hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
0a3ee6851d0a7ef05afcdf6a271167449fef0269
7,867
py
Python
lib/layers/functions/prior_box.py
arleyzhang/object-detection-pytorch
de96a507e6643a7019b94d92f77219439ccca29f
[ "MIT" ]
4
2018-10-10T03:06:38.000Z
2018-12-18T07:32:30.000Z
lib/layers/functions/prior_box.py
arleyzhang/object-detection-pytorch
de96a507e6643a7019b94d92f77219439ccca29f
[ "MIT" ]
null
null
null
lib/layers/functions/prior_box.py
arleyzhang/object-detection-pytorch
de96a507e6643a7019b94d92f77219439ccca29f
[ "MIT" ]
1
2018-10-10T03:06:39.000Z
2018-10-10T03:06:39.000Z
from __future__ import division from math import sqrt as sqrt from itertools import product as product import torch import numpy as np import cv2 from lib.utils.visualize_utils import TBWriter def vis(func): """tensorboard visualization if has writer as input""" def wrapper(*args, **kw): return func(*args, **kw) if kw['tb_writer'] is not None else None return wrapper class PriorBoxBase(object): """Compute priorbox coordinates in center-offset form for each source feature map. """ def __init__(self, cfg): super(PriorBoxBase, self).__init__() self.image_size = cfg.MODEL.IMAGE_SIZE self._steps = cfg.MODEL.STEPS self._cfg_list = [] self._prior_cfg = {} self._clip = cfg.MODEL.CLIP self._variance = cfg.MODEL.VARIANCE for v in self._variance: if v <= 0: raise ValueError('Variances must be greater than 0') def _setup(self, cfg): num_feat = len(self._steps) for item in self._cfg_list: if item not in cfg.MODEL: raise Exception("wrong anchor config!") if len(cfg.MODEL[item]) != num_feat and len(cfg.MODEL[item]) != 0: raise Exception("config {} length does not match step length!".format(item)) self._prior_cfg[item] = cfg.MODEL[item] @property def num_priors(self): """allow prior num calculation before knowing feature map size""" assert self._prior_cfg is not {} return [int(len(self._create_prior(0, 0, k)) / 4) for k in range(len(self._steps))] def _create_prior(self, cx, cy, k): raise NotImplementedError @vis def _image_proc(self, image=None, tb_writer=None): # TODO test with image if isinstance(image, type(None)): image = np.ones((self.image_size[1], self.image_size[0], 3)) elif isinstance(image, str): image = cv2.imread(image, -1) image = cv2.resize(image, (self.image_size[1], self.image_size[0])) return image @vis def _prior_vis(self, anchor, image_ori, feat_idx, tb_writer=None): # TODO add output path to the signature writer = tb_writer.writer prior_num = self.num_priors[feat_idx] # transform coordinates scale = [self.image_size[1], self.image_size[0], self.image_size[1], self.image_size[0]] bboxs = np.array(anchor).reshape((-1, 4)) box_centers = bboxs[:, :2] * scale[:2] # [x, y] # bboxs: [xmin, ymin, xmax, ymax] bboxs = np.hstack((bboxs[:, :2] - bboxs[:, 2:4] / 2, bboxs[:, :2] + bboxs[:, 2:4] / 2)) * scale box_centers = box_centers.astype(np.int32) bboxs = bboxs.astype(np.int32) # visualize each anchor box on a feature map for prior_idx in range(prior_num): image = image_ori.copy() bboxs_ = bboxs[prior_idx::prior_num, :] box_centers_ = box_centers[4 * prior_idx::prior_num, :] for archor, bbox in zip(box_centers_, bboxs_): cv2.circle(image, (archor[0], archor[1]), 1, (0, 0, 255), -1) if archor[0] == archor[1]: # only show diagnal anchors cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 1) image = image[..., ::-1] image = image.transpose((2,0,1)) writer.add_image('base/feature_map_{}_{}'.format(feat_idx, prior_idx), image, 2) def forward(self, layer_dims, tb_writer=None, image=None): priors = [] image = self._image_proc(image=image, tb_writer=tb_writer) for k in range(len(layer_dims)): prior = [] for i, j in product(range(layer_dims[k][0]), range(layer_dims[k][1])): steps_x = self.image_size[1] / self._steps[k] steps_y = self.image_size[0] / self._steps[k] cx = (j + 0.5) / steps_x # unit center x,y cy = (i + 0.5) / steps_y prior += self._create_prior(cx, cy, k) priors += prior self._prior_vis(prior, image, k, tb_writer=tb_writer) output = torch.Tensor(priors).view(-1, 4) # TODO this clip is meanless, should clip on [xmin, ymin, xmax, ymax] if self._clip: output.clamp_(max=1, min=0) return output class PriorBoxSSD(PriorBoxBase): def __init__(self, cfg): super(PriorBoxSSD, self).__init__(cfg) # self.image_size = cfg['image_size'] self._cfg_list = ['MIN_SIZES', 'MAX_SIZES', 'ASPECT_RATIOS'] self._flip = cfg.MODEL.FLIP self._setup(cfg) def _create_prior(self, cx, cy, k): # as the original paper do prior = [] min_sizes = self._prior_cfg['MIN_SIZES'][k] min_sizes = [min_sizes] if not isinstance(min_sizes, list) else min_sizes for ms in min_sizes: # min square s_i = ms / self.image_size[0] s_j = ms / self.image_size[1] prior += [cx, cy, s_j, s_i] # min max square if len(self._prior_cfg['MAX_SIZES']) != 0: assert type(self._prior_cfg['MAX_SIZES'][k]) is not list # one max size per layer s_i_prime = sqrt(s_i * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[0])) s_j_prime = sqrt(s_j * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[1])) prior += [cx, cy, s_j_prime, s_i_prime] # rectangles by min and aspect ratio for ar in self._prior_cfg['ASPECT_RATIOS'][k]: prior += [cx, cy, s_j * sqrt(ar), s_i / sqrt(ar)] # a vertical box if self._flip: prior += [cx, cy, s_j / sqrt(ar), s_i * sqrt(ar)] return prior # PriorBox = PriorBoxSSD def test_no_vis(cfg, tb_writer): cfg = copy.deepcopy(cfg) cfg['feature_maps'] = [38, 19, 10, 5, 3, 1] cfg['min_sizes'] = [[30], [60], 111, 162, 213, 264] cfg['flip'] = True feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])] p = PriorBoxSSD(cfg) print(p.num_priors) p1 = p.forward(feat_dim) print(p1) def test_filp(cfg, tb_writer): cfg = copy.deepcopy(cfg) cfg['feature_maps'] = [38, 19, 10, 5, 3, 1] cfg['flip'] = True feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])] p = PriorBoxSSD(cfg) p1 = p.forward(feat_dim, tb_writer=tb_writer) cfg['flip'] = False cfg['aspect_ratios'] = [[2, 1 / 2], [2, 1 / 2, 3, 1 / 3], [2, 1 / 2, 3, 1 / 3], [2, 1 / 2, 3, 1 / 3], [2, 1 / 2], [2, 1 / 2]] p = PriorBox(cfg) p2 = p.forward(feat_dim, tb_writer=tb_writer) # print(p2) assert (p2 - p1).sum() < 1e-8 def test_rectangle(cfg, tb_writer): cfg = copy.deepcopy(cfg) cfg['feature_maps'] = [38, 19, 10, 5, 3, 1] cfg['min_sizes'] = [30, 60, 111, 162, 213, 264] cfg['flip'] = True # feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])] # cfg['image_size'] = [300, 300] # feat_dim = [list(a) for a in zip(cfg['feature_maps'], [item * 2 for item in cfg['feature_maps']])] # cfg['image_size'] = [300, 600] feat_dim = [list(a) for a in zip([item * 2 for item in cfg['feature_maps']], cfg['feature_maps'])] cfg['image_size'] = [600, 300] p = PriorBoxSSD(cfg) p1 = p.forward(feat_dim, tb_writer=tb_writer) print(p1.shape) if __name__ == '__main__': import copy # from lib.datasets.config import ssd_voc_vgg as cfg # from lib.utils.visualize_utils import TBWriter # tb_writer = TBWriter(log_dir, {'epoch': 50}) # # test_no_vis(cfg, tb_writer) # test_filp(cfg, tb_writer) # test_rectangle(cfg, tb_writer) print('haha') from lib.utils.config import cfg print(cfg)
38.004831
104
0.583196
1,150
7,867
3.784348
0.195652
0.040441
0.047794
0.022518
0.328585
0.281939
0.258272
0.225873
0.195313
0.139476
0
0.035149
0.276726
7,867
206
105
38.18932
0.729701
0.142367
0
0.165517
0
0
0.058718
0.003287
0
0
0
0.004854
0.02069
1
0.096552
false
0
0.062069
0.006897
0.213793
0.034483
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a3fe7e6abe2393d5617b3058cbf7b54468e33ee
5,073
py
Python
python/Gaffer/SequencePath.py
cwmartin/gaffer
1f8a0f75522105c9d5efefac6d55cb61c1038909
[ "BSD-3-Clause" ]
null
null
null
python/Gaffer/SequencePath.py
cwmartin/gaffer
1f8a0f75522105c9d5efefac6d55cb61c1038909
[ "BSD-3-Clause" ]
null
null
null
python/Gaffer/SequencePath.py
cwmartin/gaffer
1f8a0f75522105c9d5efefac6d55cb61c1038909
[ "BSD-3-Clause" ]
null
null
null
########################################################################## # # Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import IECore import Gaffer class SequencePath( Gaffer.Path ) : def __init__( self, path, root="/", minSequenceSize=1, filter=None ) : if not isinstance( path, Gaffer.Path ) : path = Gaffer.FileSystemPath( path, root ) Gaffer.Path.__init__( self, path[:], path.root(), filter=filter ) # we use the seed for creating base paths whenever we need them self.__basePathSeed = path self.__minSequenceSize = minSequenceSize def isValid( self ) : for p in self.__basePaths() : if not p.isValid() : return False return True def isLeaf( self ) : for p in self.__basePaths() : if not p.isLeaf() : return False return True def info( self ) : result = Gaffer.Path.info( self ) if result is None : return None def average( values ) : return sum( values ) / len( values ) def mostCommon( values ) : counter = {} for value in values : if value in counter : counter[value] += 1 else : counter[value] = 1 maxCount = 0 mostCommonValue = None for value, count in counter.items() : if count > maxCount : mostCommonValue = value maxCount = count return mostCommonValue combiners = { "fileSystem:owner" : mostCommon, "fileSystem:group" : mostCommon, "fileSystem:modificationTime" : max, "fileSystem:accessTime" : max, "fileSystem:size" : sum, } infos = [ path.info() for path in self.__basePaths() ] if len( infos ) : for key, exampleValue in infos[0].items() : if key in result : continue combiner = combiners.get( key, None ) if combiner is None : if isinstance( exampleValue, ( int, float ) ) : combiner = average elif isinstance( exampleValue, basestring ) : combiner = mostCommon if combiner is not None : values = [ i[key] for i in infos ] result[key] = combiner( values ) return result def _children( self ) : p = self.__basePath( self ) children = p.children() nonLeafPaths = [] leafPathStrings = [] for child in children : if child.isLeaf() : leafPathStrings.append( str( child ) ) else : nonLeafPaths.append( child ) sequences = IECore.findSequences( leafPathStrings, self.__minSequenceSize ) result = [] for path in sequences + nonLeafPaths : result.append( SequencePath( self.__basePath( str( path ) ), minSequenceSize=self.__minSequenceSize, filter = self.getFilter() ) ) return result def copy( self ) : result = SequencePath( self.__basePathSeed, minSequenceSize = self.__minSequenceSize, filter = self.getFilter() ) result.setFromPath( self ) return result def __basePath( self, path ) : result = self.__basePathSeed.copy() if isinstance( path, basestring ) : result.setFromString( path ) else : result.setFromPath( path ) return result def __basePaths( self ) : sequence = None with IECore.IgnoredExceptions( Exception ) : sequence = IECore.FileSequence( str( self ) ) result = [] if sequence : for f in sequence.fileNames() : result.append( self.__basePath( f ) ) else : result.append( self.__basePath( self ) ) return result def __isSequence( self ) : s = str( self ) if IECore.FileSequence.fileNameValidator().match( s ) : return True return False
27.721311
133
0.668244
598
5,073
5.59699
0.359532
0.017927
0.022408
0.015238
0.118315
0.089633
0.057962
0.057962
0.057962
0.040633
0
0.003289
0.220974
5,073
182
134
27.873626
0.843623
0.325448
0
0.184466
0
0
0.029648
0.014824
0
0
0
0
0
1
0.106796
false
0
0.019417
0.009709
0.271845
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a42c30234b3cb9b1bf3706f896598d1f485e00b
7,765
py
Python
PhysicsTools/Heppy/python/analyzers/objects/TauAnalyzer.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
PhysicsTools/Heppy/python/analyzers/objects/TauAnalyzer.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
PhysicsTools/Heppy/python/analyzers/objects/TauAnalyzer.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle from PhysicsTools.Heppy.physicsobjects.Tau import Tau from PhysicsTools.HeppyCore.utils.deltar import deltaR, matchObjectCollection3 import PhysicsTools.HeppyCore.framework.config as cfg class TauAnalyzer( Analyzer ): def __init__(self, cfg_ana, cfg_comp, looperName ): super(TauAnalyzer,self).__init__(cfg_ana,cfg_comp,looperName) #---------------------------------------- # DECLARATION OF HANDLES OF LEPTONS STUFF #---------------------------------------- def declareHandles(self): super(TauAnalyzer, self).declareHandles() self.handles['taus'] = AutoHandle( ('slimmedTaus',''),'std::vector<pat::Tau>') def beginLoop(self, setup): super(TauAnalyzer,self).beginLoop(setup) self.counters.addCounter('events') count = self.counters.counter('events') count.register('all events') count.register('has >=1 tau at preselection') count.register('has >=1 selected taus') count.register('has >=1 other taus') #------------------ # MAKE LEPTON LISTS #------------------ def makeTaus(self, event): event.inclusiveTaus = [] event.selectedTaus = [] event.otherTaus = [] #get all alltaus = map( Tau, self.handles['taus'].product() ) #make inclusive taus for tau in alltaus: tau.associatedVertex = event.goodVertices[0] if len(event.goodVertices)>0 else event.vertices[0] tau.lepVeto = False tau.idDecayMode = tau.tauID("decayModeFinding") tau.idDecayModeNewDMs = tau.tauID("decayModeFindingNewDMs") if hasattr(self.cfg_ana, 'inclusive_decayModeID') and self.cfg_ana.inclusive_decayModeID and not tau.tauID(self.cfg_ana.inclusive_decayModeID): continue tau.inclusive_lepVeto = False if self.cfg_ana.inclusive_vetoLeptons: for lep in event.selectedLeptons: if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.inclusive_leptonVetoDR: tau.inclusive_lepVeto = True if tau.inclusive_lepVeto: continue if self.cfg_ana.inclusive_vetoLeptonsPOG: if not tau.tauID(self.cfg_ana.inclusive_tauAntiMuonID): tau.inclusive_lepVeto = True if not tau.tauID(self.cfg_ana.inclusive_tauAntiElectronID): tau.inclusive_lepVeto = True if tau.inclusive_lepVeto: continue if tau.pt() < self.cfg_ana.inclusive_ptMin: continue if abs(tau.eta()) > self.cfg_ana.inclusive_etaMax: continue if abs(tau.dxy()) > self.cfg_ana.inclusive_dxyMax or abs(tau.dz()) > self.cfg_ana.inclusive_dzMax: continue def id3(tau,X): """Create an integer equal to 1-2-3 for (loose,medium,tight)""" return tau.tauID(X%"Loose") + tau.tauID(X%"Medium") + tau.tauID(X%"Tight") def id5(tau,X): """Create an integer equal to 1-2-3-4-5 for (very loose, loose, medium, tight, very tight)""" return id3(tau, X) + tau.tauID(X%"VLoose") + tau.tauID(X%"VTight") def id6(tau,X): """Create an integer equal to 1-2-3-4-5-6 for (very loose, loose, medium, tight, very tight, very very tight)""" return id5(tau, X) + tau.tauID(X%"VVTight") tau.idMVA = id6(tau, "by%sIsolationMVArun2v1DBoldDMwLT") tau.idMVANewDM = id6(tau, "by%sIsolationMVArun2v1DBnewDMwLT") tau.idCI3hit = id3(tau, "by%sCombinedIsolationDeltaBetaCorr3Hits") tau.idAntiMu = tau.tauID("againstMuonLoose3") + tau.tauID("againstMuonTight3") tau.idAntiE = id5(tau, "againstElectron%sMVA6") #print "Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID)) if tau.tauID(self.cfg_ana.inclusive_tauID): event.inclusiveTaus.append(tau) for tau in event.inclusiveTaus: tau.loose_lepVeto = False if self.cfg_ana.loose_vetoLeptons: for lep in event.selectedLeptons: if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.loose_leptonVetoDR: tau.loose_lepVeto = True if self.cfg_ana.loose_vetoLeptonsPOG: if not tau.tauID(self.cfg_ana.loose_tauAntiMuonID): tau.loose_lepVeto = True if not tau.tauID(self.cfg_ana.loose_tauAntiElectronID): tau.loose_lepVeto = True if tau.tauID(self.cfg_ana.loose_decayModeID) and \ tau.pt() > self.cfg_ana.loose_ptMin and abs(tau.eta()) < self.cfg_ana.loose_etaMax and \ abs(tau.dxy()) < self.cfg_ana.loose_dxyMax and abs(tau.dz()) < self.cfg_ana.loose_dzMax and \ tau.tauID(self.cfg_ana.loose_tauID) and not tau.loose_lepVeto: event.selectedTaus.append(tau) else: event.otherTaus.append(tau) event.inclusiveTaus.sort(key = lambda l : l.pt(), reverse = True) event.selectedTaus.sort(key = lambda l : l.pt(), reverse = True) event.otherTaus.sort(key = lambda l : l.pt(), reverse = True) self.counters.counter('events').inc('all events') if len(event.inclusiveTaus): self.counters.counter('events').inc('has >=1 tau at preselection') if len(event.selectedTaus): self.counters.counter('events').inc('has >=1 selected taus') if len(event.otherTaus): self.counters.counter('events').inc('has >=1 other taus') def matchTaus(self, event): match = matchObjectCollection3(event.inclusiveTaus, event.gentaus, deltaRMax = 0.5) for lep in event.inclusiveTaus: gen = match[lep] lep.mcMatchId = 1 if gen else 0 lep.genp = gen def process(self, event): self.readCollections( event.input ) self.makeTaus(event) if not self.cfg_comp.isMC: return True if hasattr(event, 'gentaus'): self.matchTaus(event) return True # Find the definitions of the tau ID strings here: # http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer( class_object = TauAnalyzer, # inclusive very loose hadronic tau selection inclusive_ptMin = 18, inclusive_etaMax = 9999, inclusive_dxyMax = 1000., inclusive_dzMax = 0.4, inclusive_vetoLeptons = False, inclusive_leptonVetoDR = 0.4, inclusive_decayModeID = "decayModeFindingNewDMs", # ignored if not set or "" inclusive_tauID = "decayModeFindingNewDMs", inclusive_vetoLeptonsPOG = False, # If True, the following two IDs are required inclusive_tauAntiMuonID = "", inclusive_tauAntiElectronID = "", # loose hadronic tau selection loose_ptMin = 18, loose_etaMax = 9999, loose_dxyMax = 1000., loose_dzMax = 0.2, loose_vetoLeptons = True, loose_leptonVetoDR = 0.4, loose_decayModeID = "decayModeFindingNewDMs", # ignored if not set or "" loose_tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits", loose_vetoLeptonsPOG = False, # If True, the following two IDs are required loose_tauAntiMuonID = "againstMuonLoose3", loose_tauAntiElectronID = "againstElectronLooseMVA5" ) )
45.145349
171
0.619446
890
7,765
5.294382
0.224719
0.041596
0.057301
0.052419
0.366511
0.283531
0.219228
0.171265
0.127971
0.099533
0
0.014777
0.25924
7,765
171
172
45.409357
0.804416
0.125435
0
0.096774
0
0
0.096592
0.047626
0
0
0
0
0
1
0.072581
false
0
0.040323
0
0.16129
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a42e80075481314be34a3f3fd3ff44396a763e9
4,020
py
Python
vize/170401038.py
omuryorulmaz/kriptografi
04c22e4f05f126b14f41842597a7b27065326670
[ "Unlicense" ]
8
2020-04-15T12:06:42.000Z
2022-01-21T10:35:51.000Z
vize/170401038.py
omuryorulmaz/kriptografi
04c22e4f05f126b14f41842597a7b27065326670
[ "Unlicense" ]
3
2020-05-13T20:41:27.000Z
2020-06-11T00:45:27.000Z
vize/170401038.py
omuryorulmaz/kriptografi
04c22e4f05f126b14f41842597a7b27065326670
[ "Unlicense" ]
54
2020-04-23T14:58:50.000Z
2020-06-26T06:00:32.000Z
# İsmail ALTAY 170401038 import math import random r = 3271 def egcd(a,b): if(a == 0): return(b,0,1) else: c,d,e = egcd(b % a, a) return(c, e - (b // a) * d, d) def modInvert(a,b): c,d,e = egcd(a,b) if c != 1: raise Exception('moduler ters bulunamadi') else: return d % b def randomInteger(n): return random.randrange(2 ** (n-1), 2 ** n) | 1 def RabinMiller(f): s = 5 if(f == 2): return 1 if not (f & 1): return 0 p = f-1 u = 0 r = f-1 while (r%2 == 0): r >>= 1 u+=1 def Control(a): z = pow(a, r, f) if z == 1: return 0 for i in range(u): z = pow(a, (2**i) * r, f-1) if z == p: return 0 return 1 for i in range(s): a = random.randrange(2, p-2) if Control(a): return 0 return 1 def Keygen(n): while True: p = randomInteger(n//2) if (p - 1) % r == 0 and RabinMiller(p) and math.gcd(r, int((p - 1) / r)) == 1: break while True: q = randomInteger(n//2) if RabinMiller(q) and math.gcd(r, int(q - 1)) == 1: break N = p * q phi = (p - 1) * (q - 1) while True: y = random.randrange(1, N) if math.gcd(y, N) == 1: x = pow(y, phi * modInvert(r, N) % N, N) if x != 1: break publicKeyFile = open("publickey.txt", "w+") publicKeyFile.write(str(N) + "\n" + str(y)) publicKeyFile.close() privateKeyFile = open("privatekey.txt", "w+") privateKeyFile.write(str(phi) + "\n" + str(x) + "\n" + str(N)) privateKeyFile.close() def encrypt(plaintext, publickeytxt): try: open(publickeytxt, "r") except FileNotFoundError: print("Anahtar çiftleri oluşturulmadan şifrelme işlemi yapılamaz. Lütfen önce Keygen fonksiyonunu çalıştırın.") else: publicKeyFile = open(publickeytxt, "r") N, y = publicKeyFile.read().split("\n") N = int(N) y = int(y) publicKeyFile.close() plainTextFile = open(plaintext, "r") plainCopy = int(plainTextFile.read().split("\n")[0]) plainTextFile.close() while True: u = random.randrange(1, int(N)) if math.gcd(y, N) == 1: break cipherText = pow(y, plainCopy, N) * pow(u, r, N) % N cipherTextFile = open("ciphertext.txt", "w+") cipherTextFile.write(str(cipherText)) cipherTextFile.close() def decrypt(ciphertext, privatekeytxt): try: open(privatekeytxt, "r") except FileNotFoundError: print("Anahtar çiftleri oluşturulmadan deşifreleme işlemi yapılamz. Lütfen önce Keygen fonksiyonunu çalıştırın.") else: privateKeyFile = open(privatekeytxt, "r") phi, x, N = privateKeyFile.read().split("\n") phi, x, N = int(phi), int(x), int(N) privateKeyFile.close() cipherTextFile = open(ciphertext, "r") cipherCopy = int(cipherTextFile.read()) a = pow(cipherCopy, (phi * modInvert(r, N)) % N, N) for i in range(r -1): if(pow(x, i, N) == a): break plainText2File = open("plaintext2.txt", "w+") plainText2File.write(str(i)) plainText2File.close() plain2File = open("plaintext2.txt", "r") plain1File = open("plaintext.txt", "r") plain1 = plain1File.read().split("\n")[0] plain2 = plain2File.read().split("\n")[0] if plain1 == plain2: print("Dosyalar Özdeştir..") else: print("Dosyalar özdeş değildir..") n = int(input("Oluşturulmak istenen anahtar çiftlerinin bit uzunluğunu girin: ")) Keygen(n) encrypt("plaintext.txt","publickey.txt") decrypt("ciphertext.txt", "privatekey.txt")
26.447368
122
0.510945
495
4,020
4.151515
0.224242
0.006813
0.024331
0.016058
0.139173
0.125547
0.0691
0
0
0
0
0.028485
0.345025
4,020
151
123
26.622517
0.751614
0.005473
0
0.260504
0
0
0.131079
0
0
0
0
0
0
1
0.067227
false
0
0.016807
0.008403
0.159664
0.033613
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a433c84b0dfa57ea11f80f51e65908aaa8c4377
87,582
py
Python
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_11_04/models/_models.py
adewaleo/azure-sdk-for-python
169457edbea5e3c5557246cfcf8bd635d528bae4
[ "MIT" ]
1
2020-03-05T18:10:35.000Z
2020-03-05T18:10:35.000Z
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_11_04/models/_models.py
adewaleo/azure-sdk-for-python
169457edbea5e3c5557246cfcf8bd635d528bae4
[ "MIT" ]
2
2020-03-03T23:11:13.000Z
2020-03-30T18:50:55.000Z
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_11_04/models/_models.py
adewaleo/azure-sdk-for-python
169457edbea5e3c5557246cfcf8bd635d528bae4
[ "MIT" ]
null
null
null
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError import msrest.serialization class CertificateBodyDescription(msrest.serialization.Model): """The JSON-serialized X509 Certificate. :param certificate: base-64 representation of the X509 leaf certificate .cer file or just .pem file content. :type certificate: str """ _attribute_map = { 'certificate': {'key': 'certificate', 'type': 'str'}, } def __init__( self, **kwargs ): super(CertificateBodyDescription, self).__init__(**kwargs) self.certificate = kwargs.get('certificate', None) class CertificateDescription(msrest.serialization.Model): """The X509 Certificate. Variables are only populated by the server, and will be ignored when sending a request. :param properties: The description of an X509 CA Certificate. :type properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificateProperties :ivar id: The resource identifier. :vartype id: str :ivar name: The name of the certificate. :vartype name: str :ivar etag: The entity tag. :vartype etag: str :ivar type: The resource type. :vartype type: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'etag': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'properties': {'key': 'properties', 'type': 'CertificateProperties'}, 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, **kwargs ): super(CertificateDescription, self).__init__(**kwargs) self.properties = kwargs.get('properties', None) self.id = None self.name = None self.etag = None self.type = None class CertificateListDescription(msrest.serialization.Model): """The JSON-serialized array of Certificate objects. :param value: The array of Certificate objects. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.CertificateDescription] """ _attribute_map = { 'value': {'key': 'value', 'type': '[CertificateDescription]'}, } def __init__( self, **kwargs ): super(CertificateListDescription, self).__init__(**kwargs) self.value = kwargs.get('value', None) class CertificateProperties(msrest.serialization.Model): """The description of an X509 CA Certificate. Variables are only populated by the server, and will be ignored when sending a request. :ivar subject: The certificate's subject name. :vartype subject: str :ivar expiry: The certificate's expiration date and time. :vartype expiry: ~datetime.datetime :ivar thumbprint: The certificate's thumbprint. :vartype thumbprint: str :ivar is_verified: Determines whether certificate has been verified. :vartype is_verified: bool :ivar created: The certificate's create date and time. :vartype created: ~datetime.datetime :ivar updated: The certificate's last update date and time. :vartype updated: ~datetime.datetime :param certificate: The certificate content. :type certificate: str """ _validation = { 'subject': {'readonly': True}, 'expiry': {'readonly': True}, 'thumbprint': {'readonly': True}, 'is_verified': {'readonly': True}, 'created': {'readonly': True}, 'updated': {'readonly': True}, } _attribute_map = { 'subject': {'key': 'subject', 'type': 'str'}, 'expiry': {'key': 'expiry', 'type': 'rfc-1123'}, 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, 'is_verified': {'key': 'isVerified', 'type': 'bool'}, 'created': {'key': 'created', 'type': 'rfc-1123'}, 'updated': {'key': 'updated', 'type': 'rfc-1123'}, 'certificate': {'key': 'certificate', 'type': 'str'}, } def __init__( self, **kwargs ): super(CertificateProperties, self).__init__(**kwargs) self.subject = None self.expiry = None self.thumbprint = None self.is_verified = None self.created = None self.updated = None self.certificate = kwargs.get('certificate', None) class CertificatePropertiesWithNonce(msrest.serialization.Model): """The description of an X509 CA Certificate including the challenge nonce issued for the Proof-Of-Possession flow. Variables are only populated by the server, and will be ignored when sending a request. :ivar subject: The certificate's subject name. :vartype subject: str :ivar expiry: The certificate's expiration date and time. :vartype expiry: ~datetime.datetime :ivar thumbprint: The certificate's thumbprint. :vartype thumbprint: str :ivar is_verified: Determines whether certificate has been verified. :vartype is_verified: bool :ivar created: The certificate's create date and time. :vartype created: ~datetime.datetime :ivar updated: The certificate's last update date and time. :vartype updated: ~datetime.datetime :ivar verification_code: The certificate's verification code that will be used for proof of possession. :vartype verification_code: str :ivar certificate: The certificate content. :vartype certificate: str """ _validation = { 'subject': {'readonly': True}, 'expiry': {'readonly': True}, 'thumbprint': {'readonly': True}, 'is_verified': {'readonly': True}, 'created': {'readonly': True}, 'updated': {'readonly': True}, 'verification_code': {'readonly': True}, 'certificate': {'readonly': True}, } _attribute_map = { 'subject': {'key': 'subject', 'type': 'str'}, 'expiry': {'key': 'expiry', 'type': 'rfc-1123'}, 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, 'is_verified': {'key': 'isVerified', 'type': 'bool'}, 'created': {'key': 'created', 'type': 'rfc-1123'}, 'updated': {'key': 'updated', 'type': 'rfc-1123'}, 'verification_code': {'key': 'verificationCode', 'type': 'str'}, 'certificate': {'key': 'certificate', 'type': 'str'}, } def __init__( self, **kwargs ): super(CertificatePropertiesWithNonce, self).__init__(**kwargs) self.subject = None self.expiry = None self.thumbprint = None self.is_verified = None self.created = None self.updated = None self.verification_code = None self.certificate = None class CertificateVerificationDescription(msrest.serialization.Model): """The JSON-serialized leaf certificate. :param certificate: base-64 representation of X509 certificate .cer file or just .pem file content. :type certificate: str """ _attribute_map = { 'certificate': {'key': 'certificate', 'type': 'str'}, } def __init__( self, **kwargs ): super(CertificateVerificationDescription, self).__init__(**kwargs) self.certificate = kwargs.get('certificate', None) class CertificateWithNonceDescription(msrest.serialization.Model): """The X509 Certificate. Variables are only populated by the server, and will be ignored when sending a request. :param properties: The description of an X509 CA Certificate including the challenge nonce issued for the Proof-Of-Possession flow. :type properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificatePropertiesWithNonce :ivar id: The resource identifier. :vartype id: str :ivar name: The name of the certificate. :vartype name: str :ivar etag: The entity tag. :vartype etag: str :ivar type: The resource type. :vartype type: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'etag': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'properties': {'key': 'properties', 'type': 'CertificatePropertiesWithNonce'}, 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, **kwargs ): super(CertificateWithNonceDescription, self).__init__(**kwargs) self.properties = kwargs.get('properties', None) self.id = None self.name = None self.etag = None self.type = None class CloudToDeviceProperties(msrest.serialization.Model): """The IoT hub cloud-to-device messaging properties. :param max_delivery_count: The max delivery count for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to- device-messages. :type max_delivery_count: int :param default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud- to-device-messages. :type default_ttl_as_iso8601: ~datetime.timedelta :param feedback: The properties of the feedback queue for cloud-to-device messages. :type feedback: ~azure.mgmt.iothub.v2019_11_04.models.FeedbackProperties """ _validation = { 'max_delivery_count': {'maximum': 100, 'minimum': 1}, } _attribute_map = { 'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'}, 'default_ttl_as_iso8601': {'key': 'defaultTtlAsIso8601', 'type': 'duration'}, 'feedback': {'key': 'feedback', 'type': 'FeedbackProperties'}, } def __init__( self, **kwargs ): super(CloudToDeviceProperties, self).__init__(**kwargs) self.max_delivery_count = kwargs.get('max_delivery_count', None) self.default_ttl_as_iso8601 = kwargs.get('default_ttl_as_iso8601', None) self.feedback = kwargs.get('feedback', None) class EndpointHealthData(msrest.serialization.Model): """The health data for an endpoint. :param endpoint_id: Id of the endpoint. :type endpoint_id: str :param health_status: Health statuses have following meanings. The 'healthy' status shows that the endpoint is accepting messages as expected. The 'unhealthy' status shows that the endpoint is not accepting messages as expected and IoT Hub is retrying to send data to this endpoint. The status of an unhealthy endpoint will be updated to healthy when IoT Hub has established an eventually consistent state of health. The 'dead' status shows that the endpoint is not accepting messages, after IoT Hub retried sending messages for the retrial period. See IoT Hub metrics to identify errors and monitor issues with endpoints. The 'unknown' status shows that the IoT Hub has not established a connection with the endpoint. No messages have been delivered to or rejected from this endpoint. Possible values include: "unknown", "healthy", "unhealthy", "dead". :type health_status: str or ~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthStatus """ _attribute_map = { 'endpoint_id': {'key': 'endpointId', 'type': 'str'}, 'health_status': {'key': 'healthStatus', 'type': 'str'}, } def __init__( self, **kwargs ): super(EndpointHealthData, self).__init__(**kwargs) self.endpoint_id = kwargs.get('endpoint_id', None) self.health_status = kwargs.get('health_status', None) class EndpointHealthDataListResult(msrest.serialization.Model): """The JSON-serialized array of EndpointHealthData objects with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: JSON-serialized array of Endpoint health data. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthData] :ivar next_link: Link to more results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[EndpointHealthData]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(EndpointHealthDataListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class EnrichmentProperties(msrest.serialization.Model): """The properties of an enrichment that your IoT hub applies to messages delivered to endpoints. All required parameters must be populated in order to send to Azure. :param key: Required. The key or name for the enrichment property. :type key: str :param value: Required. The value for the enrichment property. :type value: str :param endpoint_names: Required. The list of endpoints for which the enrichment is applied to the message. :type endpoint_names: list[str] """ _validation = { 'key': {'required': True}, 'value': {'required': True}, 'endpoint_names': {'required': True, 'min_items': 1}, } _attribute_map = { 'key': {'key': 'key', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, 'endpoint_names': {'key': 'endpointNames', 'type': '[str]'}, } def __init__( self, **kwargs ): super(EnrichmentProperties, self).__init__(**kwargs) self.key = kwargs['key'] self.value = kwargs['value'] self.endpoint_names = kwargs['endpoint_names'] class ErrorDetails(msrest.serialization.Model): """Error details. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: The error code. :vartype code: str :ivar http_status_code: The HTTP status code. :vartype http_status_code: str :ivar message: The error message. :vartype message: str :ivar details: The error details. :vartype details: str """ _validation = { 'code': {'readonly': True}, 'http_status_code': {'readonly': True}, 'message': {'readonly': True}, 'details': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'http_status_code': {'key': 'httpStatusCode', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'details': {'key': 'details', 'type': 'str'}, } def __init__( self, **kwargs ): super(ErrorDetails, self).__init__(**kwargs) self.code = None self.http_status_code = None self.message = None self.details = None class EventHubConsumerGroupInfo(msrest.serialization.Model): """The properties of the EventHubConsumerGroupInfo object. Variables are only populated by the server, and will be ignored when sending a request. :param properties: The tags. :type properties: dict[str, str] :ivar id: The Event Hub-compatible consumer group identifier. :vartype id: str :ivar name: The Event Hub-compatible consumer group name. :vartype name: str :ivar type: the resource type. :vartype type: str :ivar etag: The etag. :vartype etag: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, } _attribute_map = { 'properties': {'key': 'properties', 'type': '{str}'}, 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, } def __init__( self, **kwargs ): super(EventHubConsumerGroupInfo, self).__init__(**kwargs) self.properties = kwargs.get('properties', None) self.id = None self.name = None self.type = None self.etag = None class EventHubConsumerGroupsListResult(msrest.serialization.Model): """The JSON-serialized array of Event Hub-compatible consumer group names with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: List of consumer groups objects. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.EventHubConsumerGroupInfo] :ivar next_link: The next link. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[EventHubConsumerGroupInfo]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(EventHubConsumerGroupsListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class EventHubProperties(msrest.serialization.Model): """The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub. Variables are only populated by the server, and will be ignored when sending a request. :param retention_time_in_days: The retention time for device-to-cloud messages in days. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages. :type retention_time_in_days: long :param partition_count: The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide- messaging#device-to-cloud-messages. :type partition_count: int :ivar partition_ids: The partition ids in the Event Hub-compatible endpoint. :vartype partition_ids: list[str] :ivar path: The Event Hub-compatible name. :vartype path: str :ivar endpoint: The Event Hub-compatible endpoint. :vartype endpoint: str """ _validation = { 'partition_ids': {'readonly': True}, 'path': {'readonly': True}, 'endpoint': {'readonly': True}, } _attribute_map = { 'retention_time_in_days': {'key': 'retentionTimeInDays', 'type': 'long'}, 'partition_count': {'key': 'partitionCount', 'type': 'int'}, 'partition_ids': {'key': 'partitionIds', 'type': '[str]'}, 'path': {'key': 'path', 'type': 'str'}, 'endpoint': {'key': 'endpoint', 'type': 'str'}, } def __init__( self, **kwargs ): super(EventHubProperties, self).__init__(**kwargs) self.retention_time_in_days = kwargs.get('retention_time_in_days', None) self.partition_count = kwargs.get('partition_count', None) self.partition_ids = None self.path = None self.endpoint = None class ExportDevicesRequest(msrest.serialization.Model): """Use to provide parameters when requesting an export of all devices in the IoT hub. All required parameters must be populated in order to send to Azure. :param export_blob_container_uri: Required. The export blob container URI. :type export_blob_container_uri: str :param exclude_keys: Required. The value indicating whether keys should be excluded during export. :type exclude_keys: bool """ _validation = { 'export_blob_container_uri': {'required': True}, 'exclude_keys': {'required': True}, } _attribute_map = { 'export_blob_container_uri': {'key': 'exportBlobContainerUri', 'type': 'str'}, 'exclude_keys': {'key': 'excludeKeys', 'type': 'bool'}, } def __init__( self, **kwargs ): super(ExportDevicesRequest, self).__init__(**kwargs) self.export_blob_container_uri = kwargs['export_blob_container_uri'] self.exclude_keys = kwargs['exclude_keys'] class FailoverInput(msrest.serialization.Model): """Use to provide failover region when requesting manual Failover for a hub. All required parameters must be populated in order to send to Azure. :param failover_region: Required. Region the hub will be failed over to. :type failover_region: str """ _validation = { 'failover_region': {'required': True}, } _attribute_map = { 'failover_region': {'key': 'failoverRegion', 'type': 'str'}, } def __init__( self, **kwargs ): super(FailoverInput, self).__init__(**kwargs) self.failover_region = kwargs['failover_region'] class FallbackRouteProperties(msrest.serialization.Model): """The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint. All required parameters must be populated in order to send to Azure. :param name: The name of the route. The name can only include alphanumeric characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique. :type name: str :param source: Required. The source to which the routing rule is to be applied to. For example, DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents". :type source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource :param condition: The condition which is evaluated in order to apply the fallback route. If the condition is not provided it will evaluate to true by default. For grammar, See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language. :type condition: str :param endpoint_names: Required. The list of endpoints to which the messages that satisfy the condition are routed to. Currently only 1 endpoint is allowed. :type endpoint_names: list[str] :param is_enabled: Required. Used to specify whether the fallback route is enabled. :type is_enabled: bool """ _validation = { 'source': {'required': True}, 'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1}, 'is_enabled': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'source': {'key': 'source', 'type': 'str'}, 'condition': {'key': 'condition', 'type': 'str'}, 'endpoint_names': {'key': 'endpointNames', 'type': '[str]'}, 'is_enabled': {'key': 'isEnabled', 'type': 'bool'}, } def __init__( self, **kwargs ): super(FallbackRouteProperties, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.source = kwargs['source'] self.condition = kwargs.get('condition', None) self.endpoint_names = kwargs['endpoint_names'] self.is_enabled = kwargs['is_enabled'] class FeedbackProperties(msrest.serialization.Model): """The properties of the feedback queue for cloud-to-device messages. :param lock_duration_as_iso8601: The lock duration for the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages. :type lock_duration_as_iso8601: ~datetime.timedelta :param ttl_as_iso8601: The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide- messaging#cloud-to-device-messages. :type ttl_as_iso8601: ~datetime.timedelta :param max_delivery_count: The number of times the IoT hub attempts to deliver a message on the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud- to-device-messages. :type max_delivery_count: int """ _validation = { 'max_delivery_count': {'maximum': 100, 'minimum': 1}, } _attribute_map = { 'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'}, 'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'}, 'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'}, } def __init__( self, **kwargs ): super(FeedbackProperties, self).__init__(**kwargs) self.lock_duration_as_iso8601 = kwargs.get('lock_duration_as_iso8601', None) self.ttl_as_iso8601 = kwargs.get('ttl_as_iso8601', None) self.max_delivery_count = kwargs.get('max_delivery_count', None) class ImportDevicesRequest(msrest.serialization.Model): """Use to provide parameters when requesting an import of all devices in the hub. All required parameters must be populated in order to send to Azure. :param input_blob_container_uri: Required. The input blob container URI. :type input_blob_container_uri: str :param output_blob_container_uri: Required. The output blob container URI. :type output_blob_container_uri: str """ _validation = { 'input_blob_container_uri': {'required': True}, 'output_blob_container_uri': {'required': True}, } _attribute_map = { 'input_blob_container_uri': {'key': 'inputBlobContainerUri', 'type': 'str'}, 'output_blob_container_uri': {'key': 'outputBlobContainerUri', 'type': 'str'}, } def __init__( self, **kwargs ): super(ImportDevicesRequest, self).__init__(**kwargs) self.input_blob_container_uri = kwargs['input_blob_container_uri'] self.output_blob_container_uri = kwargs['output_blob_container_uri'] class IotHubCapacity(msrest.serialization.Model): """IoT Hub capacity information. Variables are only populated by the server, and will be ignored when sending a request. :ivar minimum: The minimum number of units. :vartype minimum: long :ivar maximum: The maximum number of units. :vartype maximum: long :ivar default: The default number of units. :vartype default: long :ivar scale_type: The type of the scaling enabled. Possible values include: "Automatic", "Manual", "None". :vartype scale_type: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubScaleType """ _validation = { 'minimum': {'readonly': True, 'maximum': 1, 'minimum': 1}, 'maximum': {'readonly': True}, 'default': {'readonly': True}, 'scale_type': {'readonly': True}, } _attribute_map = { 'minimum': {'key': 'minimum', 'type': 'long'}, 'maximum': {'key': 'maximum', 'type': 'long'}, 'default': {'key': 'default', 'type': 'long'}, 'scale_type': {'key': 'scaleType', 'type': 'str'}, } def __init__( self, **kwargs ): super(IotHubCapacity, self).__init__(**kwargs) self.minimum = None self.maximum = None self.default = None self.scale_type = None class Resource(msrest.serialization.Model): """The common properties of an Azure resource. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: The resource identifier. :vartype id: str :ivar name: The resource name. :vartype name: str :ivar type: The resource type. :vartype type: str :param location: Required. The resource location. :type location: str :param tags: A set of tags. The resource tags. :type tags: dict[str, str] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'}, 'type': {'readonly': True}, 'location': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name = None self.type = None self.location = kwargs['location'] self.tags = kwargs.get('tags', None) class IotHubDescription(Resource): """The description of the IoT hub. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: The resource identifier. :vartype id: str :ivar name: The resource name. :vartype name: str :ivar type: The resource type. :vartype type: str :param location: Required. The resource location. :type location: str :param tags: A set of tags. The resource tags. :type tags: dict[str, str] :param etag: The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention. :type etag: str :param properties: IotHub properties. :type properties: ~azure.mgmt.iothub.v2019_11_04.models.IotHubProperties :param sku: Required. IotHub SKU info. :type sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'}, 'type': {'readonly': True}, 'location': {'required': True}, 'sku': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'properties': {'key': 'properties', 'type': 'IotHubProperties'}, 'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'}, } def __init__( self, **kwargs ): super(IotHubDescription, self).__init__(**kwargs) self.etag = kwargs.get('etag', None) self.properties = kwargs.get('properties', None) self.sku = kwargs['sku'] class IotHubDescriptionListResult(msrest.serialization.Model): """The JSON-serialized array of IotHubDescription objects with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: The array of IotHubDescription objects. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubDescription] :ivar next_link: The next link. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[IotHubDescription]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(IotHubDescriptionListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class IotHubLocationDescription(msrest.serialization.Model): """Public representation of one of the locations where a resource is provisioned. :param location: The name of the Azure region. :type location: str :param role: The role of the region, can be either primary or secondary. The primary region is where the IoT hub is currently provisioned. The secondary region is the Azure disaster recovery (DR) paired region and also the region where the IoT hub can failover to. Possible values include: "primary", "secondary". :type role: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubReplicaRoleType """ _attribute_map = { 'location': {'key': 'location', 'type': 'str'}, 'role': {'key': 'role', 'type': 'str'}, } def __init__( self, **kwargs ): super(IotHubLocationDescription, self).__init__(**kwargs) self.location = kwargs.get('location', None) self.role = kwargs.get('role', None) class IotHubNameAvailabilityInfo(msrest.serialization.Model): """The properties indicating whether a given IoT hub name is available. Variables are only populated by the server, and will be ignored when sending a request. :ivar name_available: The value which indicates whether the provided name is available. :vartype name_available: bool :ivar reason: The reason for unavailability. Possible values include: "Invalid", "AlreadyExists". :vartype reason: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubNameUnavailabilityReason :param message: The detailed reason message. :type message: str """ _validation = { 'name_available': {'readonly': True}, 'reason': {'readonly': True}, } _attribute_map = { 'name_available': {'key': 'nameAvailable', 'type': 'bool'}, 'reason': {'key': 'reason', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(IotHubNameAvailabilityInfo, self).__init__(**kwargs) self.name_available = None self.reason = None self.message = kwargs.get('message', None) class IotHubProperties(msrest.serialization.Model): """The properties of an IoT hub. Variables are only populated by the server, and will be ignored when sending a request. :param authorization_policies: The shared access policies you can use to secure a connection to the IoT hub. :type authorization_policies: list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule] :param ip_filter_rules: The IP filter rules. :type ip_filter_rules: list[~azure.mgmt.iothub.v2019_11_04.models.IpFilterRule] :ivar provisioning_state: The provisioning state. :vartype provisioning_state: str :ivar state: The hub state. :vartype state: str :ivar host_name: The name of the host. :vartype host_name: str :param event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible keys to this dictionary is events. This key has to be present in the dictionary while making create or update calls for the IoT hub. :type event_hub_endpoints: dict[str, ~azure.mgmt.iothub.v2019_11_04.models.EventHubProperties] :param routing: The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging. :type routing: ~azure.mgmt.iothub.v2019_11_04.models.RoutingProperties :param storage_endpoints: The list of Azure Storage endpoints where you can upload files. Currently you can configure only one Azure Storage account and that MUST have its key as $default. Specifying more than one storage account causes an error to be thrown. Not specifying a value for this property when the enableFileUploadNotifications property is set to True, causes an error to be thrown. :type storage_endpoints: dict[str, ~azure.mgmt.iothub.v2019_11_04.models.StorageEndpointProperties] :param messaging_endpoints: The messaging endpoint properties for the file upload notification queue. :type messaging_endpoints: dict[str, ~azure.mgmt.iothub.v2019_11_04.models.MessagingEndpointProperties] :param enable_file_upload_notifications: If True, file upload notifications are enabled. :type enable_file_upload_notifications: bool :param cloud_to_device: The IoT hub cloud-to-device messaging properties. :type cloud_to_device: ~azure.mgmt.iothub.v2019_11_04.models.CloudToDeviceProperties :param comments: IoT hub comments. :type comments: str :param features: The capabilities and features enabled for the IoT hub. Possible values include: "None", "DeviceManagement". :type features: str or ~azure.mgmt.iothub.v2019_11_04.models.Capabilities :ivar locations: Primary and secondary location for iot hub. :vartype locations: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubLocationDescription] """ _validation = { 'provisioning_state': {'readonly': True}, 'state': {'readonly': True}, 'host_name': {'readonly': True}, 'locations': {'readonly': True}, } _attribute_map = { 'authorization_policies': {'key': 'authorizationPolicies', 'type': '[SharedAccessSignatureAuthorizationRule]'}, 'ip_filter_rules': {'key': 'ipFilterRules', 'type': '[IpFilterRule]'}, 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, 'state': {'key': 'state', 'type': 'str'}, 'host_name': {'key': 'hostName', 'type': 'str'}, 'event_hub_endpoints': {'key': 'eventHubEndpoints', 'type': '{EventHubProperties}'}, 'routing': {'key': 'routing', 'type': 'RoutingProperties'}, 'storage_endpoints': {'key': 'storageEndpoints', 'type': '{StorageEndpointProperties}'}, 'messaging_endpoints': {'key': 'messagingEndpoints', 'type': '{MessagingEndpointProperties}'}, 'enable_file_upload_notifications': {'key': 'enableFileUploadNotifications', 'type': 'bool'}, 'cloud_to_device': {'key': 'cloudToDevice', 'type': 'CloudToDeviceProperties'}, 'comments': {'key': 'comments', 'type': 'str'}, 'features': {'key': 'features', 'type': 'str'}, 'locations': {'key': 'locations', 'type': '[IotHubLocationDescription]'}, } def __init__( self, **kwargs ): super(IotHubProperties, self).__init__(**kwargs) self.authorization_policies = kwargs.get('authorization_policies', None) self.ip_filter_rules = kwargs.get('ip_filter_rules', None) self.provisioning_state = None self.state = None self.host_name = None self.event_hub_endpoints = kwargs.get('event_hub_endpoints', None) self.routing = kwargs.get('routing', None) self.storage_endpoints = kwargs.get('storage_endpoints', None) self.messaging_endpoints = kwargs.get('messaging_endpoints', None) self.enable_file_upload_notifications = kwargs.get('enable_file_upload_notifications', None) self.cloud_to_device = kwargs.get('cloud_to_device', None) self.comments = kwargs.get('comments', None) self.features = kwargs.get('features', None) self.locations = None class IotHubQuotaMetricInfo(msrest.serialization.Model): """Quota metrics properties. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name of the quota metric. :vartype name: str :ivar current_value: The current value for the quota metric. :vartype current_value: long :ivar max_value: The maximum value of the quota metric. :vartype max_value: long """ _validation = { 'name': {'readonly': True}, 'current_value': {'readonly': True}, 'max_value': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'current_value': {'key': 'currentValue', 'type': 'long'}, 'max_value': {'key': 'maxValue', 'type': 'long'}, } def __init__( self, **kwargs ): super(IotHubQuotaMetricInfo, self).__init__(**kwargs) self.name = None self.current_value = None self.max_value = None class IotHubQuotaMetricInfoListResult(msrest.serialization.Model): """The JSON-serialized array of IotHubQuotaMetricInfo objects with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: The array of quota metrics objects. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubQuotaMetricInfo] :ivar next_link: The next link. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[IotHubQuotaMetricInfo]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(IotHubQuotaMetricInfoListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class IotHubSkuDescription(msrest.serialization.Model): """SKU properties. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar resource_type: The type of the resource. :vartype resource_type: str :param sku: Required. The type of the resource. :type sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo :param capacity: Required. IotHub capacity. :type capacity: ~azure.mgmt.iothub.v2019_11_04.models.IotHubCapacity """ _validation = { 'resource_type': {'readonly': True}, 'sku': {'required': True}, 'capacity': {'required': True}, } _attribute_map = { 'resource_type': {'key': 'resourceType', 'type': 'str'}, 'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'}, 'capacity': {'key': 'capacity', 'type': 'IotHubCapacity'}, } def __init__( self, **kwargs ): super(IotHubSkuDescription, self).__init__(**kwargs) self.resource_type = None self.sku = kwargs['sku'] self.capacity = kwargs['capacity'] class IotHubSkuDescriptionListResult(msrest.serialization.Model): """The JSON-serialized array of IotHubSkuDescription objects with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: The array of IotHubSkuDescription. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuDescription] :ivar next_link: The next link. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[IotHubSkuDescription]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(IotHubSkuDescriptionListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class IotHubSkuInfo(msrest.serialization.Model): """Information about the SKU of the IoT hub. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the SKU. Possible values include: "F1", "S1", "S2", "S3", "B1", "B2", "B3". :type name: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubSku :ivar tier: The billing tier for the IoT hub. Possible values include: "Free", "Standard", "Basic". :vartype tier: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuTier :param capacity: The number of provisioned IoT Hub units. See: https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits. :type capacity: long """ _validation = { 'name': {'required': True}, 'tier': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, 'capacity': {'key': 'capacity', 'type': 'long'}, } def __init__( self, **kwargs ): super(IotHubSkuInfo, self).__init__(**kwargs) self.name = kwargs['name'] self.tier = None self.capacity = kwargs.get('capacity', None) class IpFilterRule(msrest.serialization.Model): """The IP filter rules for the IoT hub. All required parameters must be populated in order to send to Azure. :param filter_name: Required. The name of the IP filter rule. :type filter_name: str :param action: Required. The desired action for requests captured by this rule. Possible values include: "Accept", "Reject". :type action: str or ~azure.mgmt.iothub.v2019_11_04.models.IpFilterActionType :param ip_mask: Required. A string that contains the IP address range in CIDR notation for the rule. :type ip_mask: str """ _validation = { 'filter_name': {'required': True}, 'action': {'required': True}, 'ip_mask': {'required': True}, } _attribute_map = { 'filter_name': {'key': 'filterName', 'type': 'str'}, 'action': {'key': 'action', 'type': 'str'}, 'ip_mask': {'key': 'ipMask', 'type': 'str'}, } def __init__( self, **kwargs ): super(IpFilterRule, self).__init__(**kwargs) self.filter_name = kwargs['filter_name'] self.action = kwargs['action'] self.ip_mask = kwargs['ip_mask'] class JobResponse(msrest.serialization.Model): """The properties of the Job Response object. Variables are only populated by the server, and will be ignored when sending a request. :ivar job_id: The job identifier. :vartype job_id: str :ivar start_time_utc: The start time of the job. :vartype start_time_utc: ~datetime.datetime :ivar end_time_utc: The time the job stopped processing. :vartype end_time_utc: ~datetime.datetime :ivar type: The type of the job. Possible values include: "unknown", "export", "import", "backup", "readDeviceProperties", "writeDeviceProperties", "updateDeviceConfiguration", "rebootDevice", "factoryResetDevice", "firmwareUpdate". :vartype type: str or ~azure.mgmt.iothub.v2019_11_04.models.JobType :ivar status: The status of the job. Possible values include: "unknown", "enqueued", "running", "completed", "failed", "cancelled". :vartype status: str or ~azure.mgmt.iothub.v2019_11_04.models.JobStatus :ivar failure_reason: If status == failed, this string containing the reason for the failure. :vartype failure_reason: str :ivar status_message: The status message for the job. :vartype status_message: str :ivar parent_job_id: The job identifier of the parent job, if any. :vartype parent_job_id: str """ _validation = { 'job_id': {'readonly': True}, 'start_time_utc': {'readonly': True}, 'end_time_utc': {'readonly': True}, 'type': {'readonly': True}, 'status': {'readonly': True}, 'failure_reason': {'readonly': True}, 'status_message': {'readonly': True}, 'parent_job_id': {'readonly': True}, } _attribute_map = { 'job_id': {'key': 'jobId', 'type': 'str'}, 'start_time_utc': {'key': 'startTimeUtc', 'type': 'rfc-1123'}, 'end_time_utc': {'key': 'endTimeUtc', 'type': 'rfc-1123'}, 'type': {'key': 'type', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'failure_reason': {'key': 'failureReason', 'type': 'str'}, 'status_message': {'key': 'statusMessage', 'type': 'str'}, 'parent_job_id': {'key': 'parentJobId', 'type': 'str'}, } def __init__( self, **kwargs ): super(JobResponse, self).__init__(**kwargs) self.job_id = None self.start_time_utc = None self.end_time_utc = None self.type = None self.status = None self.failure_reason = None self.status_message = None self.parent_job_id = None class JobResponseListResult(msrest.serialization.Model): """The JSON-serialized array of JobResponse objects with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: The array of JobResponse objects. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.JobResponse] :ivar next_link: The next link. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[JobResponse]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(JobResponseListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class MatchedRoute(msrest.serialization.Model): """Routes that matched. :param properties: Properties of routes that matched. :type properties: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties """ _attribute_map = { 'properties': {'key': 'properties', 'type': 'RouteProperties'}, } def __init__( self, **kwargs ): super(MatchedRoute, self).__init__(**kwargs) self.properties = kwargs.get('properties', None) class MessagingEndpointProperties(msrest.serialization.Model): """The properties of the messaging endpoints used by this IoT hub. :param lock_duration_as_iso8601: The lock duration. See: https://docs.microsoft.com/azure/iot- hub/iot-hub-devguide-file-upload. :type lock_duration_as_iso8601: ~datetime.timedelta :param ttl_as_iso8601: The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file- upload. :type ttl_as_iso8601: ~datetime.timedelta :param max_delivery_count: The number of times the IoT hub attempts to deliver a message. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload. :type max_delivery_count: int """ _validation = { 'max_delivery_count': {'maximum': 100, 'minimum': 1}, } _attribute_map = { 'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'}, 'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'}, 'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'}, } def __init__( self, **kwargs ): super(MessagingEndpointProperties, self).__init__(**kwargs) self.lock_duration_as_iso8601 = kwargs.get('lock_duration_as_iso8601', None) self.ttl_as_iso8601 = kwargs.get('ttl_as_iso8601', None) self.max_delivery_count = kwargs.get('max_delivery_count', None) class Name(msrest.serialization.Model): """Name of Iot Hub type. :param value: IotHub type. :type value: str :param localized_value: Localized value of name. :type localized_value: str """ _attribute_map = { 'value': {'key': 'value', 'type': 'str'}, 'localized_value': {'key': 'localizedValue', 'type': 'str'}, } def __init__( self, **kwargs ): super(Name, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.localized_value = kwargs.get('localized_value', None) class Operation(msrest.serialization.Model): """IoT Hub REST API operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: Operation name: {provider}/{resource}/{read | write | action | delete}. :vartype name: str :param display: The object that represents the operation. :type display: ~azure.mgmt.iothub.v2019_11_04.models.OperationDisplay """ _validation = { 'name': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, } def __init__( self, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = None self.display = kwargs.get('display', None) class OperationDisplay(msrest.serialization.Model): """The object that represents the operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar provider: Service provider: Microsoft Devices. :vartype provider: str :ivar resource: Resource Type: IotHubs. :vartype resource: str :ivar operation: Name of the operation. :vartype operation: str :ivar description: Description of the operation. :vartype description: str """ _validation = { 'provider': {'readonly': True}, 'resource': {'readonly': True}, 'operation': {'readonly': True}, 'description': {'readonly': True}, } _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = None self.resource = None self.operation = None self.description = None class OperationInputs(msrest.serialization.Model): """Input values. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the IoT hub to check. :type name: str """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationInputs, self).__init__(**kwargs) self.name = kwargs['name'] class OperationListResult(msrest.serialization.Model): """Result of the request to list IoT Hub operations. It contains a list of operations and a URL link to get the next set of results. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: List of IoT Hub operations supported by the Microsoft.Devices resource provider. :vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.Operation] :ivar next_link: URL to get the next set of operation list results if there are any. :vartype next_link: str """ _validation = { 'value': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationListResult, self).__init__(**kwargs) self.value = None self.next_link = None class RegistryStatistics(msrest.serialization.Model): """Identity registry statistics. Variables are only populated by the server, and will be ignored when sending a request. :ivar total_device_count: The total count of devices in the identity registry. :vartype total_device_count: long :ivar enabled_device_count: The count of enabled devices in the identity registry. :vartype enabled_device_count: long :ivar disabled_device_count: The count of disabled devices in the identity registry. :vartype disabled_device_count: long """ _validation = { 'total_device_count': {'readonly': True}, 'enabled_device_count': {'readonly': True}, 'disabled_device_count': {'readonly': True}, } _attribute_map = { 'total_device_count': {'key': 'totalDeviceCount', 'type': 'long'}, 'enabled_device_count': {'key': 'enabledDeviceCount', 'type': 'long'}, 'disabled_device_count': {'key': 'disabledDeviceCount', 'type': 'long'}, } def __init__( self, **kwargs ): super(RegistryStatistics, self).__init__(**kwargs) self.total_device_count = None self.enabled_device_count = None self.disabled_device_count = None class RouteCompilationError(msrest.serialization.Model): """Compilation error when evaluating route. :param message: Route error message. :type message: str :param severity: Severity of the route error. Possible values include: "error", "warning". :type severity: str or ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorSeverity :param location: Location where the route error happened. :type location: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorRange """ _attribute_map = { 'message': {'key': 'message', 'type': 'str'}, 'severity': {'key': 'severity', 'type': 'str'}, 'location': {'key': 'location', 'type': 'RouteErrorRange'}, } def __init__( self, **kwargs ): super(RouteCompilationError, self).__init__(**kwargs) self.message = kwargs.get('message', None) self.severity = kwargs.get('severity', None) self.location = kwargs.get('location', None) class RouteErrorPosition(msrest.serialization.Model): """Position where the route error happened. :param line: Line where the route error happened. :type line: int :param column: Column where the route error happened. :type column: int """ _attribute_map = { 'line': {'key': 'line', 'type': 'int'}, 'column': {'key': 'column', 'type': 'int'}, } def __init__( self, **kwargs ): super(RouteErrorPosition, self).__init__(**kwargs) self.line = kwargs.get('line', None) self.column = kwargs.get('column', None) class RouteErrorRange(msrest.serialization.Model): """Range of route errors. :param start: Start where the route error happened. :type start: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition :param end: End where the route error happened. :type end: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition """ _attribute_map = { 'start': {'key': 'start', 'type': 'RouteErrorPosition'}, 'end': {'key': 'end', 'type': 'RouteErrorPosition'}, } def __init__( self, **kwargs ): super(RouteErrorRange, self).__init__(**kwargs) self.start = kwargs.get('start', None) self.end = kwargs.get('end', None) class RouteProperties(msrest.serialization.Model): """The properties of a routing rule that your IoT hub uses to route messages to endpoints. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the route. The name can only include alphanumeric characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique. :type name: str :param source: Required. The source that the routing rule is to be applied to, such as DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents". :type source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource :param condition: The condition that is evaluated to apply the routing rule. If no condition is provided, it evaluates to true by default. For grammar, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language. :type condition: str :param endpoint_names: Required. The list of endpoints to which messages that satisfy the condition are routed. Currently only one endpoint is allowed. :type endpoint_names: list[str] :param is_enabled: Required. Used to specify whether a route is enabled. :type is_enabled: bool """ _validation = { 'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'}, 'source': {'required': True}, 'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1}, 'is_enabled': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'source': {'key': 'source', 'type': 'str'}, 'condition': {'key': 'condition', 'type': 'str'}, 'endpoint_names': {'key': 'endpointNames', 'type': '[str]'}, 'is_enabled': {'key': 'isEnabled', 'type': 'bool'}, } def __init__( self, **kwargs ): super(RouteProperties, self).__init__(**kwargs) self.name = kwargs['name'] self.source = kwargs['source'] self.condition = kwargs.get('condition', None) self.endpoint_names = kwargs['endpoint_names'] self.is_enabled = kwargs['is_enabled'] class RoutingEndpoints(msrest.serialization.Model): """The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs. :param service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the messages to, based on the routing rules. :type service_bus_queues: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusQueueEndpointProperties] :param service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the messages to, based on the routing rules. :type service_bus_topics: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusTopicEndpointProperties] :param event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on the routing rules. This list does not include the built-in Event Hubs endpoint. :type event_hubs: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingEventHubProperties] :param storage_containers: The list of storage container endpoints that IoT hub routes messages to, based on the routing rules. :type storage_containers: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerProperties] """ _attribute_map = { 'service_bus_queues': {'key': 'serviceBusQueues', 'type': '[RoutingServiceBusQueueEndpointProperties]'}, 'service_bus_topics': {'key': 'serviceBusTopics', 'type': '[RoutingServiceBusTopicEndpointProperties]'}, 'event_hubs': {'key': 'eventHubs', 'type': '[RoutingEventHubProperties]'}, 'storage_containers': {'key': 'storageContainers', 'type': '[RoutingStorageContainerProperties]'}, } def __init__( self, **kwargs ): super(RoutingEndpoints, self).__init__(**kwargs) self.service_bus_queues = kwargs.get('service_bus_queues', None) self.service_bus_topics = kwargs.get('service_bus_topics', None) self.event_hubs = kwargs.get('event_hubs', None) self.storage_containers = kwargs.get('storage_containers', None) class RoutingEventHubProperties(msrest.serialization.Model): """The properties related to an event hub endpoint. All required parameters must be populated in order to send to Azure. :param connection_string: Required. The connection string of the event hub endpoint. :type connection_string: str :param name: Required. The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. :type name: str :param subscription_id: The subscription identifier of the event hub endpoint. :type subscription_id: str :param resource_group: The name of the resource group of the event hub endpoint. :type resource_group: str """ _validation = { 'connection_string': {'required': True}, 'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'}, } _attribute_map = { 'connection_string': {'key': 'connectionString', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'subscription_id': {'key': 'subscriptionId', 'type': 'str'}, 'resource_group': {'key': 'resourceGroup', 'type': 'str'}, } def __init__( self, **kwargs ): super(RoutingEventHubProperties, self).__init__(**kwargs) self.connection_string = kwargs['connection_string'] self.name = kwargs['name'] self.subscription_id = kwargs.get('subscription_id', None) self.resource_group = kwargs.get('resource_group', None) class RoutingMessage(msrest.serialization.Model): """Routing message. :param body: Body of routing message. :type body: str :param app_properties: App properties. :type app_properties: dict[str, str] :param system_properties: System properties. :type system_properties: dict[str, str] """ _attribute_map = { 'body': {'key': 'body', 'type': 'str'}, 'app_properties': {'key': 'appProperties', 'type': '{str}'}, 'system_properties': {'key': 'systemProperties', 'type': '{str}'}, } def __init__( self, **kwargs ): super(RoutingMessage, self).__init__(**kwargs) self.body = kwargs.get('body', None) self.app_properties = kwargs.get('app_properties', None) self.system_properties = kwargs.get('system_properties', None) class RoutingProperties(msrest.serialization.Model): """The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging. :param endpoints: The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs. :type endpoints: ~azure.mgmt.iothub.v2019_11_04.models.RoutingEndpoints :param routes: The list of user-provided routing rules that the IoT hub uses to route messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and a maximum of 5 routing rules are allowed for free hubs. :type routes: list[~azure.mgmt.iothub.v2019_11_04.models.RouteProperties] :param fallback_route: The properties of the route that is used as a fall-back route when none of the conditions specified in the 'routes' section are met. This is an optional parameter. When this property is not set, the messages which do not meet any of the conditions specified in the 'routes' section get routed to the built-in eventhub endpoint. :type fallback_route: ~azure.mgmt.iothub.v2019_11_04.models.FallbackRouteProperties :param enrichments: The list of user-provided enrichments that the IoT hub applies to messages to be delivered to built-in and custom endpoints. See: https://aka.ms/telemetryoneventgrid. :type enrichments: list[~azure.mgmt.iothub.v2019_11_04.models.EnrichmentProperties] """ _attribute_map = { 'endpoints': {'key': 'endpoints', 'type': 'RoutingEndpoints'}, 'routes': {'key': 'routes', 'type': '[RouteProperties]'}, 'fallback_route': {'key': 'fallbackRoute', 'type': 'FallbackRouteProperties'}, 'enrichments': {'key': 'enrichments', 'type': '[EnrichmentProperties]'}, } def __init__( self, **kwargs ): super(RoutingProperties, self).__init__(**kwargs) self.endpoints = kwargs.get('endpoints', None) self.routes = kwargs.get('routes', None) self.fallback_route = kwargs.get('fallback_route', None) self.enrichments = kwargs.get('enrichments', None) class RoutingServiceBusQueueEndpointProperties(msrest.serialization.Model): """The properties related to service bus queue endpoint types. All required parameters must be populated in order to send to Azure. :param connection_string: Required. The connection string of the service bus queue endpoint. :type connection_string: str :param name: Required. The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need not be the same as the actual queue name. :type name: str :param subscription_id: The subscription identifier of the service bus queue endpoint. :type subscription_id: str :param resource_group: The name of the resource group of the service bus queue endpoint. :type resource_group: str """ _validation = { 'connection_string': {'required': True}, 'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'}, } _attribute_map = { 'connection_string': {'key': 'connectionString', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'subscription_id': {'key': 'subscriptionId', 'type': 'str'}, 'resource_group': {'key': 'resourceGroup', 'type': 'str'}, } def __init__( self, **kwargs ): super(RoutingServiceBusQueueEndpointProperties, self).__init__(**kwargs) self.connection_string = kwargs['connection_string'] self.name = kwargs['name'] self.subscription_id = kwargs.get('subscription_id', None) self.resource_group = kwargs.get('resource_group', None) class RoutingServiceBusTopicEndpointProperties(msrest.serialization.Model): """The properties related to service bus topic endpoint types. All required parameters must be populated in order to send to Azure. :param connection_string: Required. The connection string of the service bus topic endpoint. :type connection_string: str :param name: Required. The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need not be the same as the actual topic name. :type name: str :param subscription_id: The subscription identifier of the service bus topic endpoint. :type subscription_id: str :param resource_group: The name of the resource group of the service bus topic endpoint. :type resource_group: str """ _validation = { 'connection_string': {'required': True}, 'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'}, } _attribute_map = { 'connection_string': {'key': 'connectionString', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'subscription_id': {'key': 'subscriptionId', 'type': 'str'}, 'resource_group': {'key': 'resourceGroup', 'type': 'str'}, } def __init__( self, **kwargs ): super(RoutingServiceBusTopicEndpointProperties, self).__init__(**kwargs) self.connection_string = kwargs['connection_string'] self.name = kwargs['name'] self.subscription_id = kwargs.get('subscription_id', None) self.resource_group = kwargs.get('resource_group', None) class RoutingStorageContainerProperties(msrest.serialization.Model): """The properties related to a storage container endpoint. All required parameters must be populated in order to send to Azure. :param connection_string: Required. The connection string of the storage account. :type connection_string: str :param name: Required. The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. :type name: str :param subscription_id: The subscription identifier of the storage account. :type subscription_id: str :param resource_group: The name of the resource group of the storage account. :type resource_group: str :param container_name: Required. The name of storage container in the storage account. :type container_name: str :param file_name_format: File name format for the blob. Default format is {iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be reordered. :type file_name_format: str :param batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds. :type batch_frequency_in_seconds: int :param max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB). :type max_chunk_size_in_bytes: int :param encoding: Encoding that is used to serialize messages to blobs. Supported values are 'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Possible values include: "Avro", "AvroDeflate", "JSON". :type encoding: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerPropertiesEncoding """ _validation = { 'connection_string': {'required': True}, 'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'}, 'container_name': {'required': True}, 'batch_frequency_in_seconds': {'maximum': 720, 'minimum': 60}, 'max_chunk_size_in_bytes': {'maximum': 524288000, 'minimum': 10485760}, } _attribute_map = { 'connection_string': {'key': 'connectionString', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'subscription_id': {'key': 'subscriptionId', 'type': 'str'}, 'resource_group': {'key': 'resourceGroup', 'type': 'str'}, 'container_name': {'key': 'containerName', 'type': 'str'}, 'file_name_format': {'key': 'fileNameFormat', 'type': 'str'}, 'batch_frequency_in_seconds': {'key': 'batchFrequencyInSeconds', 'type': 'int'}, 'max_chunk_size_in_bytes': {'key': 'maxChunkSizeInBytes', 'type': 'int'}, 'encoding': {'key': 'encoding', 'type': 'str'}, } def __init__( self, **kwargs ): super(RoutingStorageContainerProperties, self).__init__(**kwargs) self.connection_string = kwargs['connection_string'] self.name = kwargs['name'] self.subscription_id = kwargs.get('subscription_id', None) self.resource_group = kwargs.get('resource_group', None) self.container_name = kwargs['container_name'] self.file_name_format = kwargs.get('file_name_format', None) self.batch_frequency_in_seconds = kwargs.get('batch_frequency_in_seconds', None) self.max_chunk_size_in_bytes = kwargs.get('max_chunk_size_in_bytes', None) self.encoding = kwargs.get('encoding', None) class RoutingTwin(msrest.serialization.Model): """Twin reference input parameter. This is an optional parameter. :param tags: A set of tags. Twin Tags. :type tags: object :param properties: :type properties: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwinProperties """ _attribute_map = { 'tags': {'key': 'tags', 'type': 'object'}, 'properties': {'key': 'properties', 'type': 'RoutingTwinProperties'}, } def __init__( self, **kwargs ): super(RoutingTwin, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.properties = kwargs.get('properties', None) class RoutingTwinProperties(msrest.serialization.Model): """RoutingTwinProperties. :param desired: Twin desired properties. :type desired: object :param reported: Twin desired properties. :type reported: object """ _attribute_map = { 'desired': {'key': 'desired', 'type': 'object'}, 'reported': {'key': 'reported', 'type': 'object'}, } def __init__( self, **kwargs ): super(RoutingTwinProperties, self).__init__(**kwargs) self.desired = kwargs.get('desired', None) self.reported = kwargs.get('reported', None) class SharedAccessSignatureAuthorizationRule(msrest.serialization.Model): """The properties of an IoT hub shared access policy. All required parameters must be populated in order to send to Azure. :param key_name: Required. The name of the shared access policy. :type key_name: str :param primary_key: The primary key. :type primary_key: str :param secondary_key: The secondary key. :type secondary_key: str :param rights: Required. The permissions assigned to the shared access policy. Possible values include: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead, RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite, ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect", "RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect", "RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect", "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect". :type rights: str or ~azure.mgmt.iothub.v2019_11_04.models.AccessRights """ _validation = { 'key_name': {'required': True}, 'rights': {'required': True}, } _attribute_map = { 'key_name': {'key': 'keyName', 'type': 'str'}, 'primary_key': {'key': 'primaryKey', 'type': 'str'}, 'secondary_key': {'key': 'secondaryKey', 'type': 'str'}, 'rights': {'key': 'rights', 'type': 'str'}, } def __init__( self, **kwargs ): super(SharedAccessSignatureAuthorizationRule, self).__init__(**kwargs) self.key_name = kwargs['key_name'] self.primary_key = kwargs.get('primary_key', None) self.secondary_key = kwargs.get('secondary_key', None) self.rights = kwargs['rights'] class SharedAccessSignatureAuthorizationRuleListResult(msrest.serialization.Model): """The list of shared access policies with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: The list of shared access policies. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule] :ivar next_link: The next link. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[SharedAccessSignatureAuthorizationRule]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(SharedAccessSignatureAuthorizationRuleListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class StorageEndpointProperties(msrest.serialization.Model): """The properties of the Azure Storage endpoint for file upload. All required parameters must be populated in order to send to Azure. :param sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for file upload is valid. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file- upload#file-upload-notification-configuration-options. :type sas_ttl_as_iso8601: ~datetime.timedelta :param connection_string: Required. The connection string for the Azure Storage account to which files are uploaded. :type connection_string: str :param container_name: Required. The name of the root container where you upload files. The container need not exist but should be creatable using the connectionString specified. :type container_name: str """ _validation = { 'connection_string': {'required': True}, 'container_name': {'required': True}, } _attribute_map = { 'sas_ttl_as_iso8601': {'key': 'sasTtlAsIso8601', 'type': 'duration'}, 'connection_string': {'key': 'connectionString', 'type': 'str'}, 'container_name': {'key': 'containerName', 'type': 'str'}, } def __init__( self, **kwargs ): super(StorageEndpointProperties, self).__init__(**kwargs) self.sas_ttl_as_iso8601 = kwargs.get('sas_ttl_as_iso8601', None) self.connection_string = kwargs['connection_string'] self.container_name = kwargs['container_name'] class TagsResource(msrest.serialization.Model): """A container holding only the Tags for a resource, allowing the user to update the tags on an IoT Hub instance. :param tags: A set of tags. Resource tags. :type tags: dict[str, str] """ _attribute_map = { 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self, **kwargs ): super(TagsResource, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) class TestAllRoutesInput(msrest.serialization.Model): """Input for testing all routes. :param routing_source: Routing source. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents". :type routing_source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource :param message: Routing message. :type message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage :param twin: Routing Twin Reference. :type twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin """ _attribute_map = { 'routing_source': {'key': 'routingSource', 'type': 'str'}, 'message': {'key': 'message', 'type': 'RoutingMessage'}, 'twin': {'key': 'twin', 'type': 'RoutingTwin'}, } def __init__( self, **kwargs ): super(TestAllRoutesInput, self).__init__(**kwargs) self.routing_source = kwargs.get('routing_source', None) self.message = kwargs.get('message', None) self.twin = kwargs.get('twin', None) class TestAllRoutesResult(msrest.serialization.Model): """Result of testing all routes. :param routes: JSON-serialized array of matched routes. :type routes: list[~azure.mgmt.iothub.v2019_11_04.models.MatchedRoute] """ _attribute_map = { 'routes': {'key': 'routes', 'type': '[MatchedRoute]'}, } def __init__( self, **kwargs ): super(TestAllRoutesResult, self).__init__(**kwargs) self.routes = kwargs.get('routes', None) class TestRouteInput(msrest.serialization.Model): """Input for testing route. All required parameters must be populated in order to send to Azure. :param message: Routing message. :type message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage :param route: Required. Route properties. :type route: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties :param twin: Routing Twin Reference. :type twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin """ _validation = { 'route': {'required': True}, } _attribute_map = { 'message': {'key': 'message', 'type': 'RoutingMessage'}, 'route': {'key': 'route', 'type': 'RouteProperties'}, 'twin': {'key': 'twin', 'type': 'RoutingTwin'}, } def __init__( self, **kwargs ): super(TestRouteInput, self).__init__(**kwargs) self.message = kwargs.get('message', None) self.route = kwargs['route'] self.twin = kwargs.get('twin', None) class TestRouteResult(msrest.serialization.Model): """Result of testing one route. :param result: Result of testing route. Possible values include: "undefined", "false", "true". :type result: str or ~azure.mgmt.iothub.v2019_11_04.models.TestResultStatus :param details: Detailed result of testing route. :type details: ~azure.mgmt.iothub.v2019_11_04.models.TestRouteResultDetails """ _attribute_map = { 'result': {'key': 'result', 'type': 'str'}, 'details': {'key': 'details', 'type': 'TestRouteResultDetails'}, } def __init__( self, **kwargs ): super(TestRouteResult, self).__init__(**kwargs) self.result = kwargs.get('result', None) self.details = kwargs.get('details', None) class TestRouteResultDetails(msrest.serialization.Model): """Detailed result of testing a route. :param compilation_errors: JSON-serialized list of route compilation errors. :type compilation_errors: list[~azure.mgmt.iothub.v2019_11_04.models.RouteCompilationError] """ _attribute_map = { 'compilation_errors': {'key': 'compilationErrors', 'type': '[RouteCompilationError]'}, } def __init__( self, **kwargs ): super(TestRouteResultDetails, self).__init__(**kwargs) self.compilation_errors = kwargs.get('compilation_errors', None) class UserSubscriptionQuota(msrest.serialization.Model): """User subscription quota response. :param id: IotHub type id. :type id: str :param type: Response type. :type type: str :param unit: Unit of IotHub type. :type unit: str :param current_value: Current number of IotHub type. :type current_value: int :param limit: Numerical limit on IotHub type. :type limit: int :param name: IotHub type. :type name: ~azure.mgmt.iothub.v2019_11_04.models.Name """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'current_value': {'key': 'currentValue', 'type': 'int'}, 'limit': {'key': 'limit', 'type': 'int'}, 'name': {'key': 'name', 'type': 'Name'}, } def __init__( self, **kwargs ): super(UserSubscriptionQuota, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.type = kwargs.get('type', None) self.unit = kwargs.get('unit', None) self.current_value = kwargs.get('current_value', None) self.limit = kwargs.get('limit', None) self.name = kwargs.get('name', None) class UserSubscriptionQuotaListResult(msrest.serialization.Model): """Json-serialized array of User subscription quota response. Variables are only populated by the server, and will be ignored when sending a request. :param value: :type value: list[~azure.mgmt.iothub.v2019_11_04.models.UserSubscriptionQuota] :ivar next_link: :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[UserSubscriptionQuota]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(UserSubscriptionQuotaListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None
36.706622
283
0.650042
10,063
87,582
5.50154
0.072046
0.017955
0.013312
0.020574
0.560836
0.516202
0.480203
0.432029
0.404736
0.377299
0
0.012971
0.221826
87,582
2,385
284
36.722013
0.799337
0.44258
0
0.557131
0
0.001668
0.251472
0.037216
0
0
0
0
0
1
0.05588
false
0
0.003336
0
0.209341
0.005004
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a435a4c25f9daef6a9569a6a1c22b40cc97a64d
18,579
py
Python
tests/unit/controllers/v1/test_rbac_for_supported_st2api_endpoints.py
cognifloyd/st2-open-rbac
fb3251223743e497267277fe9f5cef91f41ade34
[ "Apache-2.0" ]
null
null
null
tests/unit/controllers/v1/test_rbac_for_supported_st2api_endpoints.py
cognifloyd/st2-open-rbac
fb3251223743e497267277fe9f5cef91f41ade34
[ "Apache-2.0" ]
null
null
null
tests/unit/controllers/v1/test_rbac_for_supported_st2api_endpoints.py
cognifloyd/st2-open-rbac
fb3251223743e497267277fe9f5cef91f41ade34
[ "Apache-2.0" ]
null
null
null
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict import six import mock from st2common.services import triggers as trigger_service with mock.patch.object(trigger_service, 'create_trigger_type_db', mock.MagicMock()): from st2api.controllers.v1.webhooks import HooksHolder from st2common.persistence.rbac import UserRoleAssignment from st2common.models.db.rbac import UserRoleAssignmentDB from st2common.service_setup import register_service_in_service_registry from st2common.services import coordination from st2tests import config as tests_config from st2tests.fixturesloader import FixturesLoader from open_rbac.tests import APIControllerWithRBACTestCase from tests.unit.controllers.v1.test_webhooks import DUMMY_TRIGGER_DICT http_client = six.moves.http_client __all__ = [ 'APIControllersRBACTestCase' ] FIXTURES_PACK = 'generic' TEST_FIXTURES = OrderedDict([ ('runners', ['testrunner1.yaml', 'run-local.yaml']), ('sensors', ['sensor1.yaml']), ('actions', ['action1.yaml', 'local.yaml']), ('aliases', ['alias1.yaml']), ('triggers', ['trigger1.yaml', 'cron1.yaml']), ('rules', ['rule1.yaml']), ('triggertypes', ['triggertype1.yaml']), ('executions', ['execution1.yaml']), ('liveactions', ['liveaction1.yaml', 'parentliveaction.yaml', 'childliveaction.yaml']), ('enforcements', ['enforcement1.yaml']), ('apikeys', ['apikey1.yaml']), ('traces', ['trace_for_test_enforce.yaml']) ]) MOCK_RUNNER_1 = { 'name': 'test-runner-1', 'description': 'test', 'enabled': False } MOCK_ACTION_1 = { 'name': 'ma.dummy.action', 'pack': 'examples', 'description': 'test description', 'enabled': True, 'entry_point': '/tmp/test/action2.py', 'runner_type': 'local-shell-script', 'parameters': { 'c': {'type': 'string', 'default': 'C1', 'position': 0}, 'd': {'type': 'string', 'default': 'D1', 'immutable': True} } } MOCK_ACTION_ALIAS_1 = { 'name': 'alias3', 'pack': 'aliases', 'description': 'test description', 'action_ref': 'core.local', 'formats': ['a', 'b'] } MOCK_RULE_1 = { 'enabled': True, 'name': 'st2.test.rule2', 'pack': 'yoyohoneysingh', 'trigger': { 'type': 'wolfpack.triggertype-1' }, 'criteria': { 'trigger.k1': { 'pattern': 't1_p_v', 'type': 'equals' } }, 'action': { 'ref': 'sixpack.st2.test.action', 'parameters': { 'ip2': '{{rule.k1}}', 'ip1': '{{trigger.t1_p}}' } }, 'description': '' } class APIControllersRBACTestCase(APIControllerWithRBACTestCase): """ Test class which hits all the API endpoints which are behind the RBAC wall with a user which has no permissions and makes sure API returns access denied. """ register_packs = True fixtures_loader = FixturesLoader() coordinator = None @classmethod def setUpClass(cls): tests_config.parse_args(coordinator_noop=True) super(APIControllersRBACTestCase, cls).setUpClass() cls.coordinator = coordination.get_coordinator(use_cache=False) # Register mock service in the service registry for testing purposes service = six.binary_type(six.text_type('mock_service').encode('ascii')) register_service_in_service_registry(service=service, capabilities={'key1': 'value1', 'name': 'mock_service'}, start_heart=True) @classmethod def tearDownClass(cls): super(APIControllersRBACTestCase, cls).tearDownClass() coordination.coordinator_teardown(cls.coordinator) def setUp(self): super(APIControllersRBACTestCase, self).setUp() # Register packs if self.register_packs: self._register_packs() # Insert mock objects - those objects are used to test get one, edit and delete operations self.models = self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK, fixtures_dict=TEST_FIXTURES) self.role_assignment_db_model = UserRoleAssignmentDB( user='user', role='role', source='assignments/user.yaml') UserRoleAssignment.add_or_update(self.role_assignment_db_model) @mock.patch.object(HooksHolder, 'get_triggers_for_hook', mock.MagicMock( return_value=[DUMMY_TRIGGER_DICT])) def test_api_endpoints_behind_rbac_wall(self): # alias_model = self.models['aliases']['alias1.yaml'] sensor_model = self.models['sensors']['sensor1.yaml'] rule_model = self.models['rules']['rule1.yaml'] enforcement_model = self.models['enforcements']['enforcement1.yaml'] execution_model = self.models['executions']['execution1.yaml'] trace_model = self.models['traces']['trace_for_test_enforce.yaml'] timer_model = self.models['triggers']['cron1.yaml'] supported_endpoints = [ # Runners { 'path': '/v1/runnertypes', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/runnertypes/test-runner-1', 'method': 'GET' }, { 'path': '/v1/runnertypes/test-runner-1', 'method': 'PUT', 'payload': MOCK_RUNNER_1 }, # Packs { 'path': '/v1/packs', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/packs/dummy_pack_1', 'method': 'GET' }, # Pack management { 'path': '/v1/packs/install', 'method': 'POST', 'payload': {'packs': 'libcloud'} }, { 'path': '/v1/packs/uninstall', 'method': 'POST', 'payload': {'packs': 'libcloud'} }, { 'path': '/v1/packs/register', 'method': 'POST', 'payload': {'types': ['actions']} }, { 'path': '/v1/packs/index/search', 'method': 'POST', 'payload': {'query': 'cloud'} }, { 'path': '/v1/packs/index/health', 'method': 'GET' }, # Pack views { 'path': '/v1/packs/views/files/dummy_pack_1', 'method': 'GET' }, # Pack config schemas { 'path': '/v1/config_schemas', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/config_schemas/dummy_pack_1', 'method': 'GET' }, { 'path': '/v1/packs/views/file/dummy_pack_1/pack.yaml', 'method': 'GET' }, # Pack configs { 'path': '/v1/configs', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/configs/dummy_pack_1', 'method': 'GET' }, { 'path': '/v1/configs/dummy_pack_1', 'method': 'PUT', 'payload': { 'foo': 'bar' } }, # Sensors { 'path': '/v1/sensortypes', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/sensortypes/%s' % (sensor_model.ref), 'method': 'GET' }, { 'path': '/v1/sensortypes/%s' % (sensor_model.ref), 'method': 'PUT', 'payload': {'enabled': False} }, # Actions { 'path': '/v1/actions', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/actions/wolfpack.action-1', 'method': 'GET' }, { 'path': '/v1/actions', 'method': 'POST', 'payload': MOCK_ACTION_1 }, { 'path': '/v1/actions/wolfpack.action-1', 'method': 'PUT', 'payload': MOCK_ACTION_1 }, { 'path': '/v1/actions/wolfpack.action-1', 'method': 'DELETE' }, # Action aliases { 'path': '/v1/actionalias', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/actionalias/aliases.alias1', 'method': 'GET' }, { 'path': '/v1/actionalias', 'method': 'POST', 'payload': MOCK_ACTION_ALIAS_1 }, { 'path': '/v1/actionalias/aliases.alias1', 'method': 'PUT', 'payload': MOCK_ACTION_ALIAS_1 }, { 'path': '/v1/actionalias/aliases.alias1', 'method': 'DELETE' }, { 'path': '/v1/actionalias/match', 'method': 'POST', 'payload': {'command': 'test command string'} }, # Rules { 'path': '/v1/rules', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/rules/%s' % (rule_model.ref), 'method': 'GET' }, { 'path': '/v1/rules', 'method': 'POST', 'payload': MOCK_RULE_1 }, { 'path': '/v1/rules/%s' % (rule_model.ref), 'method': 'PUT', 'payload': MOCK_RULE_1 }, { 'path': '/v1/rules/%s' % (rule_model.ref), 'method': 'DELETE' }, # Rule enforcements { 'path': '/v1/ruleenforcements', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/ruleenforcements/%s' % (enforcement_model.id), 'method': 'GET' }, # Action Executions { 'path': '/v1/executions', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/executions/%s' % (execution_model.id), 'method': 'GET' }, { 'path': '/v1/executions/%s/output' % (execution_model.id), 'method': 'GET' }, { 'path': '/v1/executions', 'method': 'POST', 'payload': {'action': 'core.local'} # schedule execution / run action }, { 'path': '/v1/executions/%s' % (execution_model.id), 'method': 'DELETE' # stop execution }, { 'path': '/v1/executions/%s/re_run' % (execution_model.id), 'method': 'POST', # re-run execution 'payload': {'parameters': {}} }, # Action execution nested controllers { 'path': '/v1/executions/%s/attribute/trigger_instance' % (execution_model.id), 'method': 'GET' }, { 'path': '/v1/executions/%s/children' % (execution_model.id), 'method': 'GET' }, # Alias executions { 'path': '/v1/aliasexecution', 'method': 'POST', 'payload': {'name': 'alias1', 'format': 'foo bar ponies', 'command': 'foo bar ponies', 'user': 'channel', 'source_channel': 'bar'} }, # Webhook { 'path': '/v1/webhooks/st2', 'method': 'POST', 'payload': { 'trigger': 'some', 'payload': { 'some': 'thing' } } }, # Traces { 'path': '/v1/traces', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/traces/%s' % (trace_model.id), 'method': 'GET' }, # Timers { 'path': '/v1/timers', 'method': 'GET' }, { 'path': '/v1/timers/%s' % (timer_model.id), 'method': 'GET' }, # Webhooks { 'path': '/v1/webhooks', 'method': 'GET' }, { 'path': '/v1/webhooks/git', 'method': 'GET' }, # RBAC - roles { 'path': '/v1/rbac/roles', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/rbac/roles/admin', 'method': 'GET' }, # RBAC - user role assignments { 'path': '/v1/rbac/role_assignments', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/rbac/role_assignments/%s' % (self.role_assignment_db_model['id']), 'method': 'GET' }, # RBAC - permission types { 'path': '/v1/rbac/permission_types', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/rbac/permission_types/action', 'method': 'GET' }, # Action views { 'path': '/v1/actions/views/overview', 'method': 'GET', 'is_getall': True }, # Rule views { 'path': '/v1/rules/views', 'method': 'GET', 'is_getall': True }, # Service registry { 'path': '/v1/service_registry/groups', 'method': 'GET', 'is_getall': True }, { 'path': '/v1/service_registry/groups/mock_service/members', 'method': 'GET', 'is_getall': True } ] self.use_user(self.users['no_permissions']) for endpoint in supported_endpoints: response = self._perform_request_for_endpoint(endpoint=endpoint) msg = '%s "%s" didn\'t return 403 status code (body=%s)' % (endpoint['method'], endpoint['path'], response.body) self.assertEqual(response.status_code, http_client.FORBIDDEN, msg) # Also test ?limit=-1 - non-admin user self.use_user(self.users['observer']) for endpoint in supported_endpoints: if not endpoint.get('is_getall', False): continue response = self.app.get(endpoint['path'] + '?limit=-1', expect_errors=True) msg = '%s "%s" didn\'t return 403 status code (body=%s)' % (endpoint['method'], endpoint['path'], response.body) self.assertEqual(response.status_code, http_client.FORBIDDEN, msg) # Also test ?limit=-1 - admin user self.use_user(self.users['admin']) for endpoint in supported_endpoints: if not endpoint.get('is_getall', False): continue response = self.app.get(endpoint['path'] + '?limit=-1') self.assertEqual(response.status_code, http_client.OK) def test_icon_png_file_is_whitelisted(self): self.use_user(self.users['no_permissions']) # Test that access to icon.png file doesn't require any permissions response = self.app.get('/v1/packs/views/file/dummy_pack_2/icon.png') self.assertEqual(response.status_code, http_client.OK) # Other files should return forbidden response = self.app.get('/v1/packs/views/file/dummy_pack_2/pack.yaml', expect_errors=True) self.assertEqual(response.status_code, http_client.FORBIDDEN) def _perform_request_for_endpoint(self, endpoint): if endpoint['method'] == 'GET': response = self.app.get(endpoint['path'], expect_errors=True) elif endpoint['method'] == 'POST': return self.app.post_json(endpoint['path'], endpoint['payload'], expect_errors=True) elif endpoint['method'] == 'PUT': return self.app.put_json(endpoint['path'], endpoint['payload'], expect_errors=True) elif endpoint['method'] == 'DELETE': return self.app.delete(endpoint['path'], expect_errors=True) else: raise ValueError('Unsupported method: %s' % (endpoint['method'])) return response
33.78
98
0.466171
1,604
18,579
5.262469
0.222569
0.045492
0.026063
0.036252
0.348063
0.291316
0.263713
0.192631
0.131027
0.121076
0
0.013864
0.398245
18,579
549
99
33.84153
0.741145
0.091663
0
0.315904
0
0
0.248453
0.066932
0
0
0
0
0.010893
1
0.013072
false
0
0.028322
0
0.058824
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a4491bed67c4627a06dabc6e88940ee8f57226d
14,777
py
Python
ResNet/dropblock.py
whj363636/CamDrop
f8af8c200665145f112b59348f60fc4cf80f04ec
[ "MIT" ]
null
null
null
ResNet/dropblock.py
whj363636/CamDrop
f8af8c200665145f112b59348f60fc4cf80f04ec
[ "MIT" ]
null
null
null
ResNet/dropblock.py
whj363636/CamDrop
f8af8c200665145f112b59348f60fc4cf80f04ec
[ "MIT" ]
1
2021-11-06T11:22:49.000Z
2021-11-06T11:22:49.000Z
# -*- coding: utf-8 -*- # File: dropblock.py from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import six # from tensorpack.tfutils.compat import tfv1 as tf # this should be avoided first in model code from tensorpack.tfutils.tower import get_current_tower_context from tensorpack.models import GlobalAvgPooling, FullyConnected import tensorflow as tf __all__ = ['dropblock', 'dropblock2','dropblock3','dropblock4'] # 1: paper baseline; 2: group dropout; 3: group soft-dropout; 4: Uout group dropout def dropblock(net, keep_prob, dropblock_size, gap_w=None, label=None, G=None, CG=None, data_format='channels_first'): """DropBlock: a regularization method for convolutional neural networks. DropBlock is a form of structured dropout, where units in a contiguous region of a feature map are dropped together. DropBlock works better than dropout on convolutional layers due to the fact that activation units in convolutional layers are spatially correlated. See https://arxiv.org/pdf/1810.12890.pdf for details. Args: net: `Tensor` input tensor. is_training: `bool` for whether the model is training. keep_prob: `float` or `Tensor` keep_prob parameter of DropBlock. "None" means no DropBlock. dropblock_size: `int` size of blocks to be dropped by DropBlock. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A version of input tensor with DropBlock applied. Raises: if width and height of the input tensor are not equal. """ ctx = get_current_tower_context() is_training = bool(ctx.is_training) if not is_training or keep_prob is None: return net tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape)) if data_format == 'channels_last': _, width, height, _ = net.get_shape().as_list() else: _, _, width, height = net.get_shape().as_list() if width != height: raise ValueError('Input tensor with width!=height is not supported.') dropblock_size = min(dropblock_size, width) # seed_drop_rate is the gamma parameter of DropBlcok. seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / ( width - dropblock_size + 1)**2 # Forces the block to be inside the feature map. w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width)) valid_block_center = tf.logical_and( tf.logical_and(w_i >= int(dropblock_size // 2), w_i < width - (dropblock_size - 1) // 2), tf.logical_and(h_i >= int(dropblock_size // 2), h_i < width - (dropblock_size - 1) // 2)) valid_block_center = tf.expand_dims(valid_block_center, 0) valid_block_center = tf.expand_dims( valid_block_center, -1 if data_format == 'channels_last' else 0) randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32) block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast( (1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1 block_pattern = tf.cast(block_pattern, dtype=tf.float32) if dropblock_size == width: block_pattern = tf.reduce_min( block_pattern, axis=[1, 2] if data_format == 'channels_last' else [2, 3], keepdims=True) else: if data_format == 'channels_last': ksize = [1, dropblock_size, dropblock_size, 1] else: ksize = [1, 1, dropblock_size, dropblock_size] block_pattern = -tf.nn.max_pool( -block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC' if data_format == 'channels_last' else 'NCHW') percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast( tf.size(block_pattern), tf.float32) net = net / tf.cast(percent_ones, net.dtype) * tf.cast( block_pattern, net.dtype) return net def dropblock2(net, keep_prob, dropblock_size, G=None, CG=None, data_format='channels_first'): """ mimic GN """ ctx = get_current_tower_context() is_training = bool(ctx.is_training) if not is_training or keep_prob is None: return net tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape)) if data_format == 'channels_last': N, height, width, C = net.get_shape().as_list() else: N, C, height, width = net.get_shape().as_list() N = tf.shape(net)[0] if width != height: raise ValueError('Input tensor with width!=height is not supported.') if G == None: G = C // CG if CG == None: CG = C // G net = tf.reshape(net, [N, G, CG, height, width]) dropblock_size = min(dropblock_size, width) # seed_drop_rate is the gamma parameter of DropBlcok. # seed_drop_rate = (1.0 - keep_prob) * width**2 * G**2 / (C * dropblock_size**2) / (C * (width - dropblock_size + 1)**2) seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2 # Forces the block to be inside the feature map. w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width)) valid_block_center = tf.logical_and( tf.logical_and(w_i >= int(dropblock_size // 2), w_i < width - (dropblock_size - 1) // 2), tf.logical_and(h_i >= int(dropblock_size // 2), h_i < width - (dropblock_size - 1) // 2)) valid_block_center = tf.expand_dims(valid_block_center, 0) # for depth valid_block_center = tf.expand_dims(valid_block_center, 0) # for batch valid_block_center = tf.expand_dims(valid_block_center, 0) # for channel randnoise = tf.random_uniform([N, G, 1, width, height], dtype=tf.float32) block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast( (1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1 block_pattern = tf.cast(block_pattern, dtype=tf.float32) if dropblock_size == width: block_pattern = tf.reduce_min(block_pattern, axis=[2, 3, 4], keepdims=True) else: ksize = [1, 1, dropblock_size, dropblock_size] block_pattern = tf.reduce_max(-block_pattern, reduction_indices=[2]) block_pattern = -tf.nn.max_pool(block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME', data_format='NCHW') block_pattern = tf.expand_dims(block_pattern, 2) percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32) net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype) net = tf.reshape(net, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height, width]) return net def CamDrop(net, keep_prob, dropblock_size, flag=None, label=None, G=None, CG=None, data_format='channels_first'): '''CamDrop''' def _get_cam(net, label, flag, dropblock_size, data_format='channels_first'): ''' net: [N, C, H, W] gap_w : [gap_C, num_of_class] ''' if data_format == 'channels_last': N, height, width, C = net.get_shape().as_list() else: N, C, height, width = net.get_shape().as_list() N = tf.shape(net)[0] gap_w = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'linear/W') if flag > 0 else None if not gap_w is None: gap_w = tf.convert_to_tensor(gap_w, tf.float32) gap_C, num = tf.squeeze(gap_w, 0).get_shape().as_list() # [gap_C, num] gap_w = tf.reshape(gap_w, [C, gap_C//C, num]) gap_w = tf.reduce_mean(gap_w, reduction_indices=[1]) # [C, num] label = tf.gather(tf.transpose(gap_w), label) # [N, C] # spatial weights = tf.expand_dims(label, 2) # [N, C, 1] net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width]) cam = tf.matmul(weights, net, transpose_a=True) # [N, 1, width*height] # spt_mask = tf.not_equal(cam, tf.reduce_max(cam, reduction_indices=[2], keepdims=True)) # cam = tf.reshape(cam, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height, width]) # cam = tf.nn.avg_pool(cam, ksize=[1, 1, dropblock_size, dropblock_size], strides=[1, 1, 1, 1], padding='VALID', data_format='NCHW') # left_or_top = (dropblock_size-1) // 2 # right_or_bot = left_or_top if dropblock_size % 2 == 1 else dropblock_size-left_or_top-1 # cam = tf.pad(cam, [[0, 0], [0, 0], [left_or_top, right_or_bot], [left_or_top, right_or_bot]]) # cam = tf.reshape(cam, [N, height*width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height*width]) k = tf.cast(height*width/dropblock_size**2, tf.int32) topk, _ = tf.math.top_k(cam, k=k) # [N, 1, k] topk = tf.gather(topk, indices=[k-1], axis=-1) # [N, 1, 1] spt_mask = (cam < topk) spt_mask = tf.reshape(spt_mask, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(spt_mask, [N, 1, height, width]) # channel k = tf.cast(C/8, tf.int32) topk, _ = tf.math.top_k(label, k=k+1) # [N, k] topk = tf.gather(topk, indices=k, axis=1) # [N, 1] topk = tf.expand_dims(topk, 1) # [N, C, 1] chan_mask = (label < topk) chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1] chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1, 1] cam_mask = tf.logical_or(spt_mask, chan_mask) # chan_mask = tf.reshape(tf.nn.softmax(cam), [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(tf.nn.softmax(cam), [N*C, height*width]) # chan_mask = tf.reshape(cam, [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(cam, [N*C, height*width]) # chan_mask = tf.reshape(tf.nn.sigmoid(cam), [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(tf.nn.sigmoid(cam), [N, 1, height, width]) else: cam_mask = False return cam_mask # def _get_gradcam(net, cost=None, gap_w=None, data_format='channels_first'): # # Conv layer tensor [?,2048,10,10] # def _compute_gradients(tensor, var_list): # grads = tf.gradients(tensor, var_list) # return [grad if grad is not None else tf.zeros_like(var) # for var, grad in zip(var_list, grads)] # # grads = tf.gradients(cost, net)[0] # if not gap_w is None: # # Normalizing the gradients # if data_format == 'channels_last': # N, height, width, C = net.get_shape().as_list() # else: # N, C, height, width = net.get_shape().as_list() # N = tf.shape(net)[0] # grads = _compute_gradients(cost, [net])[0] # norm_grads = tf.divide(grads, tf.sqrt(tf.reduce_mean(tf.square(grads), reduction_indices=[2,3], keepdims=True)) + tf.constant(1e-5)) # weights = tf.reduce_mean(norm_grads, reduction_indices=[2,3]) # [N, C] # weights = tf.expand_dims(weights, 2) # [N, C, 1] # net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width]) # # cam_mean = 1 + tf.matmul(net, weights, transpose_a=True) # [N, width*height, 1] # cam_mean = tf.maximum(tf.matmul(weights, net, transpose_a=True), 0) # [N, 1, width*height] # cam_chan = tf.maximum(tf.multiply(net, weights), 0) # [N, C, width*height] # cam = cam_mean*cam_chan # # Passing through ReLU # cam = cam / tf.reduce_max(cam, reduction_indices=[1,2], keepdims=True) # cam = tf.reshape(cam, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(cam, [N, C, height, width]) # else: # cam = 0. # return cam # def _gumbel_softmax(logits, tau, shape, seed_drop_rate, eps=1e-20): # if logits == False: # return logits # U = tf.random_uniform(tf.shape(logits), minval=0, maxval=1) # y = logits - tf.log(-tf.log(U + eps) + eps) # cam_mask = tf.nn.softmax(y / tau) # topk, _ = tf.math.top_k(cam_mask, k=tf.cast(seed_drop_rate*shape[-1], tf.int32)) # [N, 1] # topk = tf.gather(topk, indices=tf.cast(seed_drop_rate*shape[-1], tf.int32)-1, axis=1) # topk = tf.expand_dims(topk, 1) # [N, C, 1] # cam_mask = (cam_mask < topk) # # cam_mask = tf.cast(tf.equal(cam_mask, tf.reduce_max(cam_mask, reduction_indices=[1], keepdims=True)), tf.float32) # cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1] # cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1, 1] # return cam_mask ctx = get_current_tower_context() is_training = bool(ctx.is_training) if not is_training or keep_prob is None: return net tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape)) if data_format == 'channels_last': _, width, height, C = net.get_shape().as_list() else: _, C, width, height = net.get_shape().as_list() if width != height: raise ValueError('Input tensor with width!=height is not supported.') N = tf.shape(net)[0] dropblock_size = min(dropblock_size, width) # seed_drop_rate is the gamma parameter of DropBlcok. seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2 cam_mask = _get_cam(net, label, flag, dropblock_size, data_format) # Forces the block to be inside the feature map. w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width)) valid_block_center = tf.logical_and( tf.logical_and(w_i >= int(dropblock_size // 2), w_i < width - (dropblock_size - 1) // 2), tf.logical_and(h_i >= int(dropblock_size // 2), h_i < width - (dropblock_size - 1) // 2)) valid_block_center = tf.expand_dims(valid_block_center, 0) valid_block_center = tf.expand_dims(valid_block_center, -1 if data_format == 'channels_last' else 0) randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32) block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast((1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1 block_pattern = tf.logical_or(block_pattern, cam_mask) block_pattern = tf.cast(block_pattern, dtype=tf.float32) if dropblock_size == width: block_pattern = tf.reduce_min( block_pattern, axis=[1, 2] if data_format == 'channels_last' else [2, 3], keepdims=True) else: if data_format == 'channels_last': ksize = [1, dropblock_size, dropblock_size, 1] else: ksize = [1, 1, dropblock_size, dropblock_size] block_pattern = -tf.nn.max_pool( -block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC' if data_format == 'channels_last' else 'NCHW') percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32) net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype) return net
45.891304
166
0.663667
2,295
14,777
4.061002
0.116776
0.079506
0.054077
0.049356
0.688734
0.655579
0.640665
0.60676
0.603112
0.584013
0
0.021737
0.193679
14,777
322
167
45.891304
0.76047
0.35474
0
0.647399
0
0
0.067293
0
0
0
0
0
0
1
0.023121
false
0
0.046243
0
0.109827
0.00578
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a44f5460d17d97fc0728fbb786ff7e11153576a
2,374
py
Python
tutorial/deprecated/tutorial_recurrent_policy/main_a2c.py
Purple-PI/rlstructures
9b201b083715bbda2f3534b010c84e11dfc0a1c7
[ "MIT" ]
281
2021-01-13T14:20:23.000Z
2022-03-23T08:46:56.000Z
tutorial/deprecated/tutorial_recurrent_policy/main_a2c.py
Purple-PI/rlstructures
9b201b083715bbda2f3534b010c84e11dfc0a1c7
[ "MIT" ]
2
2021-01-22T23:28:34.000Z
2021-04-29T22:05:42.000Z
tutorial/deprecated/tutorial_recurrent_policy/main_a2c.py
Purple-PI/rlstructures
9b201b083715bbda2f3534b010c84e11dfc0a1c7
[ "MIT" ]
13
2021-01-15T14:53:32.000Z
2022-03-22T11:12:54.000Z
# # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from rlstructures import logging from rlstructures.env_wrappers import GymEnv, GymEnvInf from rlstructures.tools import weight_init import torch.nn as nn import copy import torch import time import numpy as np import torch.nn.functional as F from tutorial.tutorial_recurrent_policy.agent import RecurrentAgent from tutorial.tutorial_recurrent_policy.a2c import A2C import gym from gym.wrappers import TimeLimit # We write the 'create_env' and 'create_agent' function in the main file to allow these functions to be used with pickle when creating the batcher processes def create_gym_env(env_name): return gym.make(env_name) def create_env(n_envs, env_name=None, max_episode_steps=None, seed=None): envs = [] for k in range(n_envs): e = create_gym_env(env_name) e = TimeLimit(e, max_episode_steps=max_episode_steps) envs.append(e) return GymEnv(envs, seed) def create_train_env(n_envs, env_name=None, max_episode_steps=None, seed=None): envs = [] for k in range(n_envs): e = create_gym_env(env_name) e = TimeLimit(e, max_episode_steps=max_episode_steps) envs.append(e) return GymEnvInf(envs, seed) def create_agent(model, n_actions=1): return RecurrentAgent(model=model, n_actions=n_actions) class Experiment(A2C): def __init__(self, config, create_env, create_train_env, create_agent): super().__init__(config, create_env, create_train_env, create_agent) if __name__ == "__main__": # We use spawn mode such that most of the environment will run in multiple processes import torch.multiprocessing as mp mp.set_start_method("spawn") config = { "env_name": "CartPole-v0", "a2c_timesteps": 3, "n_envs": 4, "max_episode_steps": 100, "env_seed": 42, "n_threads": 4, "n_evaluation_threads": 2, "n_evaluation_episodes": 256, "time_limit": 3600, "lr": 0.001, "discount_factor": 0.95, "critic_coef": 1.0, "entropy_coef": 0.01, "a2c_coef": 1.0, "logdir": "./results", } exp = Experiment(config, create_env, create_train_env, create_agent) exp.run()
29.675
156
0.700505
347
2,374
4.536023
0.391931
0.031131
0.066709
0.02859
0.315756
0.259212
0.259212
0.259212
0.182973
0.182973
0
0.019691
0.208509
2,374
79
157
30.050633
0.817988
0.171019
0
0.178571
0
0
0.101582
0.01072
0
0
0
0
0
1
0.089286
false
0
0.25
0.035714
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a458f21d72d88857440d86e340b226a075998cd
19,980
py
Python
dashboard/gnd-app.py
buchmuseum/GND_Dashboard
c8c039bc8c09c480fc5ab8a0b186cd9dc37d7423
[ "CC0-1.0" ]
5
2021-01-21T17:54:23.000Z
2021-08-09T07:34:10.000Z
dashboard/gnd-app.py
buchmuseum/GND_Dashboard
c8c039bc8c09c480fc5ab8a0b186cd9dc37d7423
[ "CC0-1.0" ]
2
2021-07-27T13:38:06.000Z
2021-08-05T16:01:19.000Z
dashboard/gnd-app.py
buchmuseum/GND_Dashboard
c8c039bc8c09c480fc5ab8a0b186cd9dc37d7423
[ "CC0-1.0" ]
2
2021-03-02T12:48:14.000Z
2021-07-17T08:48:48.000Z
from matplotlib.pyplot import title import streamlit as st import pandas as pd import altair as alt import pydeck as pdk import os import glob from wordcloud import WordCloud import streamlit_analytics path = os.path.dirname(__file__) streamlit_analytics.start_tracking() @st.cache def load_gnd_top_daten(typ): gnd_top_df = pd.DataFrame() for file in glob.glob(f'{path}/../stats/title_gnd_{typ}_*.csv'): gnd_top_df = gnd_top_df.append(pd.read_csv(file, index_col=None)) return gnd_top_df def sachbegriff_cloud(): #wordcloud der top 100 sachbegriffe eines auszuwählenden tages der letzten 10 werktage st.header('TOP 100 Sachbegriffe pro Tag') st.write('Wählen Sie ein Datum aus den letzten 10 Werktagen vor der letzten Aktualisierung der Daten des Dashboards und sehen Sie eine Wordcloud der 100 meistverwendeten GND-Sachbegriffe dieses Tages. Die Größe des Begriffes entspricht der Häufigkeit des Sachbegriffs.') files = glob.glob(f'{path}/../stats/*Ts-count.csv') daten = [x[-23:-13] for x in files] daten.sort() daten_filter = st.select_slider('Wählen Sie ein Datum', options=daten, value=daten[-1]) df = pd.read_csv(f'{path}/../stats/{daten_filter}-Ts-count.csv') dict = df.to_dict(orient='records') worte = {} for record in dict: worte.update({record['sachbegriff']:record['count']}) wc = WordCloud(background_color="white", max_words=100, width=2000, height=800, colormap='tab20') wc.generate_from_frequencies(worte) return st.image(wc.to_array()) def wirkungsorte(): #ranking und karte der meistverwendeten wirkungsorte aller personen in der gnd df = pd.read_csv(f'{path}/wirkungsorte-top50.csv') df.drop(columns=['id'], inplace=True) df.rename(columns={'name': 'Name', 'count': 'Anzahl'}, inplace=True) st.header('TOP Wirkungsorte von GND-Personen') st.markdown('Von allen Personensätzen (Tp) weisen 782.682 Angaben zum Wirkungsort der jeweiligen Person auf.') #Balkendiagramm orte_filt = st.slider('Zeige Top …', min_value=3, max_value=len(df), value=10, step=1) graph_count = alt.Chart(df.nlargest(orte_filt, 'Anzahl', keep='all')).mark_bar().encode( alt.X('Name:N', sort='y'), alt.Y('Anzahl'), alt.Color('Name:N', legend=alt.Legend(columns=2)), tooltip=[alt.Tooltip('Name:N', title='Ort'), alt.Tooltip('Anzahl:Q', title='Anzahl')] ) st.altair_chart(graph_count, use_container_width=True) #Karte INITIAL_VIEW_STATE = pdk.ViewState( latitude=50.67877877706058, longitude=8.129981238464392, zoom=4.5, max_zoom=16, bearing=0 ) scatterplotlayer = pdk.Layer( "ScatterplotLayer", df, pickable=True, opacity=0.5, stroked=True, filled=True, radius_min_pixels=1, radius_max_pixels=100, line_width_min_pixels=1, get_position='[lon, lat]', get_radius="Anzahl", get_fill_color=[255, 140, 0], get_line_color=[0, 0, 0] ) st.pydeck_chart(pdk.Deck( scatterplotlayer, initial_view_state=INITIAL_VIEW_STATE, map_style=pdk.map_styles.LIGHT, tooltip={"html": "<b>{Name}</b><br \>Wirkungsort von {Anzahl} Personen"})) def wirkungsorte_musik(): #nach jahrzehnten zwischen 1400 und 2010 gefilterte auswertung der GND-Musikwerke, Musik-Personen und Wikrungsorte und daraus abgeleitete Zentren der Musikkultur, dargestellt auf einer Karte musiker_orte = pd.read_csv(f'{path}/musiker_orte.csv', sep='\t', index_col='idn') st.header('Wirkungszentren der Musik 1400–2010') st.write('Eine Auswertung der veröffentlichten Titel von Musikern und deren Wirkungszeiten erlaubt Rückschlüsse auf die musikalischen Zentren, wie sie im Bestand der DNB repräsentiert sind.') limiter = st.slider('Jahresfilter', min_value=1400, max_value=int(musiker_orte['jahrzehnt'].max()), value=(1900), step=10) musik_filt= musiker_orte.loc[(musiker_orte['jahrzehnt'] == limiter)] musik_filt['norm']=(musik_filt['count']-musik_filt['count'].min())/(musik_filt['count'].max()-musik_filt['count'].min()) #Karte INITIAL_VIEW_STATE = pdk.ViewState( latitude=50.67877877706058, longitude=8.129981238464392, zoom=4.5, max_zoom=16, bearing=0 ) musiker_scatter = pdk.Layer( "ScatterplotLayer", musik_filt, opacity=0.8, get_position='[lon, lat]', pickable=True, stroked=True, filled=True, radius_min_pixels=1, radius_max_pixels=100, radiusscale=100, line_width_min_pixels=1, get_radius="norm*50000", get_fill_color=[50, 168, 92], get_line_color=[39, 71, 51] ) st.pydeck_chart(pdk.Deck( musiker_scatter, initial_view_state=INITIAL_VIEW_STATE, map_style=pdk.map_styles.LIGHT, tooltip={"html": "<b>{name}</b>"})) st.subheader(f'TOP 10 Wirkungszentren der {limiter}er') col1, col2 = st.beta_columns(2) i = 1 for index, row in musik_filt.nlargest(10, 'norm').iterrows(): if i <= 5: with col1: st.write(f'{i}. {row["name"]}') elif i > 5: with col2: st.write(f'{i}. {row["name"]}') i += 1 def gesamt_entity_count(): #Gesamtzahl der GND-Entitäten with open(f"{path}/../stats/gnd_entity_count.csv", "r") as f: entities = f'{int(f.read()):,}' return st.write(f"GND-Entitäten gesamt: {entities.replace(',','.')}") def relationen(): #Top 10 der GND-Relationierungscodes rels = pd.read_csv(f'{path}/../stats/gnd_codes_all.csv', index_col=False) st.subheader('Relationen') st.write('GND-Datensätze können mit anderen Datensätzen verlinkt (»relationiert«) werden. Die Art der Verlinkung wird über einen Relationierungscode beschrieben. Hier sind die am häufigsten verwendeten Relationierungscodes zu sehen. Die Auflösung der wichtigsten Codes gibt es [hier](https://wiki.dnb.de/download/attachments/51283696/Codeliste_ABCnachCode_Webseite_2012-07.pdf).') rels_filt = st.slider('Zeige Top ...', 5, len(rels), 10, 1) relation_count = alt.Chart(rels.nlargest(rels_filt, 'count', keep='all')).mark_bar().encode( alt.X('code', title='Relationierungs-Code', sort='-y'), alt.Y('count', title='Anzahl'), alt.Color('code', sort='-y', title='Relationierungscode'), tooltip=[alt.Tooltip('count', title='Anzahl'), alt.Tooltip('code', title='Code')] ) st.altair_chart(relation_count, use_container_width=True) with open(f"{path}/../stats/gnd_relation_count.csv", "r") as f: relations = f'{int(f.read()):,}' st.write(f"Relationen zwischen Entitäten gesamt: {relations.replace(',','.')}") def systematik(): #Ranking der meistverwendeten GND-Systematik-Notationen classification = pd.read_csv(f'{path}/../stats/gnd_classification_all.csv', index_col=False) st.subheader('Systematik') st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).') class_filt = st.slider('Zeige Top …', 5, len(classification), 10, 1) classification_count = alt.Chart(classification.nlargest(class_filt, 'count', keep='all')).mark_bar().encode( alt.X('id', title='Notation', sort='-y'), alt.Y('count', title='Anzahl'), alt.Color('name', sort='-y', title="Bezeichnung"), tooltip=[alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')] ) return st.altair_chart(classification_count, use_container_width=True) def systematik_ts(): #Ranking der Systematik von Ts-Sätzen classification_ts = pd.read_csv(f'{path}/../stats/gnd_classification_Ts_all.csv', index_col=False) st.subheader('Systematik der Sachbegriffe') st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Hier sind die Systematik-Notationen der Sachbegriffe (Ts) aufgetragen. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).') class_ts_filt = st.slider('Zeige TOP …', min_value=5, max_value=len(classification_ts), value=10, step=1) classification_ts_count = alt.Chart(classification_ts.nlargest(class_ts_filt, 'count', keep='all')).mark_bar().encode( alt.X('id:N', title='Notation', sort='-y'), alt.Y('count:Q', title='Anzahl'), alt.Color('name:N', sort='-y', title='Bezeichnung'), tooltip = [alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')] ) return st.altair_chart(classification_ts_count, use_container_width=True) def zeitverlauf(): #zeitverlauf der erstellung der GND-Sätze ab Januar 1972 created_at = pd.read_csv(f'{path}/../stats/gnd_created_at.csv', index_col='created_at', parse_dates=True, header=0, names=['created_at', 'count']) st.subheader('Zeitverlauf der GND-Datensatzerstellung') st.write('Auf einer Zeitleiste wird die Anzahl der monatlich erstellten GND-Sätze aufgetragen. Die ersten Sätze stammen aus dem Januar 1972') created_filt = st.slider('Zeitraum', 1972, 2021, (1972,2021), 1) created = alt.Chart(created_at[f'{created_filt[0]}':f'{created_filt[1]}'].reset_index()).mark_line().encode( alt.X('created_at:T', title='Erstelldatum'), alt.Y('count:Q', title='Sätze pro Monat'), tooltip=['count'] ) return st.altair_chart(created, use_container_width=True) def entities(): #GND-Entitäten nach Satzart und Katalogisierungslevel df = pd.read_csv(f'{path}/../stats/gnd_entity_types.csv', index_col=False, names=['entity','count']) df['level'] = df.entity.str[2:3] df.entity = df.entity.str[:2] if satzart == 'alle': entity_count = alt.Chart(df).mark_bar().encode( alt.X('sum(count)', title='Datensätze pro Katalogisierungslevel'), alt.Y('entity', title='Satzart'), alt.Color('level', title='Katalogisierungslevel'), tooltip=[alt.Tooltip('entity', title='Satzart'), alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')] ) st.subheader('Entitäten und Katalogisierungslevel') else: entity_count = alt.Chart(df.loc[df['entity'].str.startswith(satzart[:2])]).mark_bar().encode( alt.X('sum(count)', title='Datensätze pro Katalogisierungslevel'), alt.Y('entity', title='Satzart'), alt.Color('level', title='Katalogisierungslevel'), tooltip=[alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')] ) st.subheader(f'Katalogisierungslevel in Satzart {satzart}') st.write('Alle GND-Entitäten können in verschiedenen Katalogisierungsleveln (1-7) angelegt werden. Je niedriger das Katalogisierungslevel, desto verlässlicher die Daten, weil Sie dann von qualifizierten Personen erstellt bzw. überprüft wurden.') return st.altair_chart(entity_count, use_container_width=True) def newcomer(): #TOP 10 der Entitäten, die in den letzten 365 Tagen erstellt wurden if satzart == 'alle': st.subheader(f'TOP 10 GND-Newcomer') st.write('TOP 10 der GND-Entitäten, die in den letzten 365 Tagen angelegt wurden.') newcomer_daten = pd.read_csv(f'{path}/../stats/title_gnd_newcomer_top10.csv', index_col=None) newcomer = alt.Chart(newcomer_daten).mark_bar().encode( alt.X('gnd_id', title='Entitäten', sort='-y'), alt.Y('count', title='Anzahl'), alt.Color('name', sort='-y', title='Entität'), tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')] ) else: st.subheader(f'TOP 10 {satzart} GND-Newcomer') st.write(f'TOP 10 der {satzart} Sätze, die in den letzten 365 Tagen angelegt wurden.') newcomer_daten = load_gnd_top_daten('newcomer_top10') newcomer = alt.Chart(newcomer_daten.loc[newcomer_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode( alt.X('gnd_id:O', title='Entitäten', sort='-y'), alt.Y('count', title='Anzahl'), alt.Color('name', sort='-y', title='Entität'), tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')] ) st.altair_chart(newcomer, use_container_width=True) def gnd_top(): #TOP 10 GND-Entitäten in DNB-Titeldaten, nach Satzart gefiltert if satzart == 'alle': st.subheader(f'TOP 10 GND-Entitäten in DNB-Titeldaten') top_daten = pd.read_csv(f'{path}/../stats/title_gnd_top10.csv', index_col=None) gnd_top = alt.Chart(top_daten).mark_bar().encode( alt.X('gnd_id:N', title='Entitäten', sort='-y'), alt.Y('count:Q', title='Anzahl'), alt.Color('name:N', sort='-y', title='Entität'), tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('count:Q', title='Anzahl')] ) else: st.subheader(f'TOP 10 {satzart} in DNB-Titeldaten') top_daten = load_gnd_top_daten('top10') gnd_top = alt.Chart(top_daten.loc[top_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode( alt.X('gnd_id:N', title='Entitäten', sort='-y'), alt.Y('count:Q', title='Anzahl'), alt.Color('name:N', sort='-y', title='Entität'), tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')] ) st.write('Verknüpfungen, die maschinell erzeugt wurden, aus Fremddaten stammen oder verwaist sind, wurden nicht in die Auswertung einbezogen. Eine detaillierte Auflistung der ausgewerteten Felder ist im [GitHub-Repository](https://git.io/JG5vN) dieses Dashboards dokumentiert.') st.altair_chart(gnd_top, use_container_width=True) def dnb_links(): #GND-Verknüpfungen in DNB Titeldaten if satzart == 'alle': #Anzahl GND-Verknüpfungen in DNB-Titeldaten with open(f"{path}/../stats/title_gnd_links.csv", "r") as f: links = f'{int(f.read()):,}' #GND-Entitäten maschinell verknüpft with open(f"{path}/../stats/title_gnd_links_auto.csv", "r") as f: auto_entites = int(f.read()) #GND-Entitäten aus Fremddaten with open(f"{path}/../stats/title_gnd_links_ext.csv", "r") as f: fremd_entities = int(f.read()) #Anzahl der intellktuell verknüpften GND-Entitäten in DNB-Titeldaten with open(f"{path}/../stats/title_gnd_links_unique.csv", "r") as f: uniques = int(f.read()) uniques_str = f'{uniques:,}' #Durchschnittliche Anzahl an GND-Verknüpfungen pro DNB-Titeldatensatz with open(f"{path}/../stats/title_gnd_mean.csv", "r") as f: mean = str(round(float(f.read()),2)).replace('.',',') st.write(f"{links.replace(',','.')} intellektuell vergebene Verknüpfungen zu {uniques_str.replace(',','.')} GND-Entitäten in den DNB-Titeldaten. Durchschnittlich {mean} GND-Verknüpfungen pro DNB-Titeldatensatz") entity_df = pd.DataFrame.from_dict({"intellektuell verknüpfte Entitäten": uniques, "Entitäten aus automatischen Prozessen": auto_entites, "Entitäten aus Fremddaten": fremd_entities}, orient = "index").reset_index() entity_df = entity_df.rename(columns={"index":"Datenart", 0:"Anzahl"}) st.subheader('Datenherkunft der GND-Entitäten in DNB-Titeldaten') st.write('Weniger als ein Drittel der GND-Entitäten in DNB-Titeldaten wurde in intellektuellen Erschließungsprozessen vergeben. Jeweils ca. ein weiteres Drittel wurde in maschinellen Erschließungsprozessen vergeben, ca. ein Drittel stammt aus Fremddaten.') entities = alt.Chart(entity_df).mark_bar().encode( alt.X('sum(Datenart):N', title='Datenart'), alt.Y('sum(Anzahl):Q', title='Anzahl'), color='Datenart', tooltip='Anzahl:N' ) st.altair_chart(entities, use_container_width=True) else: with open(f"{path}/../stats/title_gnd_mean_{satzart[:2]}.csv", "r") as f: mean = str(round(float(f.read()),2)).replace('.',',') st.write(f'Durchschnittlich {mean} Verknüpfungen zu {satzart}-Sätzen pro DNB-Titeldatensatz') #main st.title('GND-Dashboard') #infoebereich oben with st.beta_container(): st.info('Hier finden Sie statistische Auswertungen der GND und ihrer Verknüpfungen mit den Titeldaten der Deutschen Nationalbibliothek (Stand der Daten: Juli 2021). Wählen Sie links die Satzart, die Sie interessiert, und Sie erhalten die verfügbaren Auswertungen und Statstiken. Verwenden Sie einen auf Chromium basierenden Browser.') with st.beta_expander("Methodik und Datenherkunft"): st.markdown(''' Datengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tonträger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen. Der Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr großen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden. Das Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html). Für grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert. Alle Skripte und Daten stehen unter CC0 Lizenz und können frei weitergenutzt werden. Die Daten werden monatlich aktualisiert. ''') #sidebar mit satzartenfilter st.sidebar.header("Satzart wählen") satzart = st.sidebar.selectbox( "Über welche GND-Satzart möchten Sie etwas erfahren?", ('alle', "Tp - Personen", "Tb - Körperschaften", "Tg - Geografika", "Ts - Sachbegriffe", "Tu - Werke", "Tf - Veranstaltungen") ) st.sidebar.info('Diese Widgets haben die GitHub-User [niko2342](https://github.com/niko2342/), [ramonvoges](https://github.com/ramonvoges), [a-wendler](https://github.com/a-wendler/) sowie Christian Baumann geschrieben. Sie gehören zur Python Community der Deutschen Nationalbibliothek.') gnd_allgemein = st.beta_container() with gnd_allgemein: st.header('GND Statistik allgemein') #allgemeine statistiken in abhängigkeit der satzart if satzart == 'alle': gesamt_entity_count() entities() newcomer() zeitverlauf() relationen() systematik() else: entities() newcomer() #besondere widgets für einzelne satzarten if satzart == "Tp - Personen": wirkungsorte() elif satzart == "Tg - Geografika": wirkungsorte_musik() wirkungsorte() elif satzart == "Ts - Sachbegriffe": sachbegriff_cloud() systematik_ts() dnb = st.beta_container() with dnb: st.header('GND in der Deutschen Nationalbibliothek') gnd_top() dnb_links() streamlit_analytics.stop_tracking()
50.71066
479
0.682382
2,661
19,980
5.016911
0.222097
0.021723
0.013483
0.013184
0.352584
0.299026
0.27191
0.246667
0.216704
0.202397
0
0.019279
0.174424
19,980
394
480
50.71066
0.78933
0.058809
0
0.246795
0
0.051282
0.426282
0.062457
0
0
0
0
0
1
0.041667
false
0
0.028846
0
0.092949
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a4800ab1d62e509adf8e4628718cf0758bb7bb5
3,189
py
Python
vize/150401052/sunucu.py
hasan-se/blm304
893d15282497a426ff96b0c8b6c77d57c406742e
[ "Unlicense" ]
1
2021-05-04T21:46:08.000Z
2021-05-04T21:46:08.000Z
vize/150401052/sunucu.py
hasan-se/blm304
893d15282497a426ff96b0c8b6c77d57c406742e
[ "Unlicense" ]
null
null
null
vize/150401052/sunucu.py
hasan-se/blm304
893d15282497a426ff96b0c8b6c77d57c406742e
[ "Unlicense" ]
null
null
null
#Erdin Alhas 150401052 import os import sys import time from socket import * from os import system, name ip = '127.0.0.1' port = 42 s_soket = socket(AF_INET, SOCK_DGRAM) s_soket.bind((ip, port)) print("\nSunucu Hazir\n") kontrol, istemciAdres = s_soket.recvfrom(4096) s_soket.sendto(bytes("Sunucu hazir", encoding='utf-8'), istemciAdres) i, istemciAdres = s_soket.recvfrom(4096) if(i.decode("utf-8") == "listeleme yap"): dosyalar = "\n".join(os.listdir()) s_soket.sendto(bytes(dosyalar, encoding='utf-8'), istemciAdres) sys.exit() elif(i.decode("utf-8") == "put yap"): cevap = s_soket.recvfrom(4096) if(cevap[0].decode("utf-8") == "mevcut"): dosyaIsmi, istemciAdres = s_soket.recvfrom(4096) dosyaIcerigi = s_soket.recvfrom(4096) if(os.path.exists(dosyaIsmi.decode("utf-8")) == True): s_soket.sendto(bytes("aynisi mevcut", encoding='utf-8'), istemciAdres) karar = s_soket.recvfrom(4096) if(karar[0].decode("utf-8") == "1"): yeniAd = dosyaIsmi.decode("utf-8")[:-4] + " (kopya)" + ".txt" dosyaYeni = open(yeniAd, "wb") dosyaYeni.write(dosyaIcerigi[0]) dosyaYeni.close() print("\nPUT islemi basariyla gerceklesti..") else: dosyaYeni = open(dosyaIsmi, "wb") dosyaYeni.write(dosyaIcerigi[0]) dosyaYeni.close() s_soket.sendto(bytes("tamam", encoding='utf-8'), istemciAdres) print("\nPUT islemi basariyla gerceklesti..") else: print("\nGirilen adda bir dosya istemcide bulunamadi..") elif(i.decode("utf-8") == "get yap"): dosyaIsmi, istemciAdres = s_soket.recvfrom(4096) if (os.path.exists(dosyaIsmi.decode("utf-8")) == True): dosya = open(dosyaIsmi.decode("utf-8"), "rb") s_soket.sendto(bytes("dosya mevcut", encoding='utf-8'), istemciAdres) dosyaIcerik = dosya.read() dosya.close() s_soket.sendto(dosyaIcerik, istemciAdres) kontrol = s_soket.recvfrom(4096) print("\nGET islemi basariyla gerceklesti..") sys.exit() else: print("\n! Bu isimde bir dosya sunucuda mevcut değil") sys.exit() elif(i.decode("utf-8") == "bitir"): s_soket.close() print("\nSunucu kapandi") sys.exit()
54.050847
107
0.444967
289
3,189
4.844291
0.311419
0.072857
0.071429
0.102857
0.396429
0.292857
0.171429
0.078571
0.078571
0.078571
0
0.039019
0.437441
3,189
59
108
54.050847
0.74136
0.006585
0
0.298246
0
0
0.134084
0
0
0
0
0
0
1
0
false
0
0.087719
0
0.087719
0.122807
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a482fa1649b42a4ec4a6b713bc6b758170e2273
12,096
py
Python
httprunner/compat.py
panyuan209/httprunner
d90f2b9ab06963e8efa1c327975fca5296d6bc39
[ "Apache-2.0" ]
null
null
null
httprunner/compat.py
panyuan209/httprunner
d90f2b9ab06963e8efa1c327975fca5296d6bc39
[ "Apache-2.0" ]
null
null
null
httprunner/compat.py
panyuan209/httprunner
d90f2b9ab06963e8efa1c327975fca5296d6bc39
[ "Apache-2.0" ]
null
null
null
""" This module handles compatibility issues between testcase format v2 and v3. 解决httprunner2 和 3 之间测试用例兼容性问题 """ import os import sys from typing import List, Dict, Text, Union, Any from loguru import logger from httprunner import exceptions from httprunner.loader import load_project_meta, convert_relative_project_root_dir from httprunner.parser import parse_data from httprunner.utils import sort_dict_by_custom_order def convert_variables( raw_variables: Union[Dict, List, Text], test_path: Text ) -> Dict[Text, Any]: if isinstance(raw_variables, Dict): return raw_variables if isinstance(raw_variables, List): # [{"var1": 1}, {"var2": 2}] variables: Dict[Text, Any] = {} for var_item in raw_variables: if not isinstance(var_item, Dict) or len(var_item) != 1: raise exceptions.TestCaseFormatError( f"Invalid variables format: {raw_variables}" ) variables.update(var_item) return variables elif isinstance(raw_variables, Text): # get variables by function, e.g. ${get_variables()} project_meta = load_project_meta(test_path) variables = parse_data(raw_variables, {}, project_meta.functions) return variables else: raise exceptions.TestCaseFormatError( f"Invalid variables format: {raw_variables}" ) def _convert_jmespath(raw: Text) -> Text: if not isinstance(raw, Text): raise exceptions.TestCaseFormatError(f"Invalid jmespath extractor: {raw}") # content.xx/json.xx => body.xx if raw.startswith("content"): raw = f"body{raw[len('content'):]}" elif raw.startswith("json"): raw = f"body{raw[len('json'):]}" raw_list = [] for item in raw.split("."): if "-" in item: # add quotes for field with separator # e.g. headers.Content-Type => headers."Content-Type" item = item.strip('"') raw_list.append(f'"{item}"') elif item.isdigit(): # convert lst.0.name to lst[0].name if len(raw_list) == 0: logger.error(f"Invalid jmespath: {raw}") sys.exit(1) last_item = raw_list.pop() item = f"{last_item}[{item}]" raw_list.append(item) else: raw_list.append(item) return ".".join(raw_list) def _convert_extractors(extractors: Union[List, Dict]) -> Dict: """ convert extract list(v2) to dict(v3) Args: extractors: [{"varA": "content.varA"}, {"varB": "json.varB"}] Returns: {"varA": "body.varA", "varB": "body.varB"} """ v3_extractors: Dict = {} if isinstance(extractors, List): # [{"varA": "content.varA"}, {"varB": "json.varB"}] for extractor in extractors: if not isinstance(extractor, Dict): logger.error(f"Invalid extractor: {extractors}") sys.exit(1) for k, v in extractor.items(): v3_extractors[k] = v elif isinstance(extractors, Dict): # {"varA": "body.varA", "varB": "body.varB"} v3_extractors = extractors else: logger.error(f"Invalid extractor: {extractors}") sys.exit(1) for k, v in v3_extractors.items(): v3_extractors[k] = _convert_jmespath(v) return v3_extractors def _convert_validators(validators: List) -> List: for v in validators: if "check" in v and "expect" in v: # format1: {"check": "content.abc", "assert": "eq", "expect": 201} v["check"] = _convert_jmespath(v["check"]) elif len(v) == 1: # format2: {'eq': ['status_code', 201]} comparator = list(v.keys())[0] v[comparator][0] = _convert_jmespath(v[comparator][0]) return validators def _sort_request_by_custom_order(request: Dict) -> Dict: custom_order = [ "method", "url", "params", "headers", "cookies", "data", "json", "files", "timeout", "allow_redirects", "proxies", "verify", "stream", "auth", "cert", ] return sort_dict_by_custom_order(request, custom_order) def _sort_step_by_custom_order(step: Dict) -> Dict: custom_order = [ "name", "variables", "request", "testcase", "setup_hooks", "teardown_hooks", "extract", "validate", "validate_script", ] return sort_dict_by_custom_order(step, custom_order) def _ensure_step_attachment(step: Dict) -> Dict: test_dict = { "name": step["name"], } if "variables" in step: test_dict["variables"] = step["variables"] if "setup_hooks" in step: test_dict["setup_hooks"] = step["setup_hooks"] if "teardown_hooks" in step: test_dict["teardown_hooks"] = step["teardown_hooks"] if "extract" in step: test_dict["extract"] = _convert_extractors(step["extract"]) if "export" in step: test_dict["export"] = step["export"] if "validate" in step: if not isinstance(step["validate"], List): raise exceptions.TestCaseFormatError( f'Invalid teststep validate: {step["validate"]}' ) test_dict["validate"] = _convert_validators(step["validate"]) if "validate_script" in step: test_dict["validate_script"] = step["validate_script"] return test_dict def ensure_testcase_v3_api(api_content: Dict) -> Dict: logger.info("convert api in v2 to testcase format v3") teststep = { "request": _sort_request_by_custom_order(api_content["request"]), } teststep.update(_ensure_step_attachment(api_content)) teststep = _sort_step_by_custom_order(teststep) config = {"name": api_content["name"]} extract_variable_names: List = list(teststep.get("extract", {}).keys()) if extract_variable_names: config["export"] = extract_variable_names return { "config": config, "teststeps": [teststep], } def ensure_testcase_v3(test_content: Dict) -> Dict: logger.info("ensure compatibility with testcase format v2") v3_content = {"config": test_content["config"], "teststeps": []} if "teststeps" not in test_content: logger.error(f"Miss teststeps: {test_content}") sys.exit(1) if not isinstance(test_content["teststeps"], list): logger.error( f'teststeps should be list type, got {type(test_content["teststeps"])}: {test_content["teststeps"]}' ) sys.exit(1) for step in test_content["teststeps"]: teststep = {} if "request" in step: teststep["request"] = _sort_request_by_custom_order(step.pop("request")) elif "api" in step: teststep["testcase"] = step.pop("api") elif "testcase" in step: teststep["testcase"] = step.pop("testcase") else: raise exceptions.TestCaseFormatError(f"Invalid teststep: {step}") teststep.update(_ensure_step_attachment(step)) teststep = _sort_step_by_custom_order(teststep) v3_content["teststeps"].append(teststep) return v3_content def ensure_cli_args(args: List) -> List: """ ensure compatibility with deprecated cli args in v2 """ # remove deprecated --failfast if "--failfast" in args: logger.warning(f"remove deprecated argument: --failfast") args.pop(args.index("--failfast")) # convert --report-file to --html if "--report-file" in args: logger.warning(f"replace deprecated argument --report-file with --html") index = args.index("--report-file") args[index] = "--html" args.append("--self-contained-html") # keep compatibility with --save-tests in v2 if "--save-tests" in args: logger.warning( f"generate conftest.py keep compatibility with --save-tests in v2" ) args.pop(args.index("--save-tests")) _generate_conftest_for_summary(args) return args def _generate_conftest_for_summary(args: List): for arg in args: if os.path.exists(arg): test_path = arg # FIXME: several test paths maybe specified break else: logger.error(f"No valid test path specified! \nargs: {args}") sys.exit(1) conftest_content = '''# NOTICE: Generated By HttpRunner. import json import os import time import pytest from loguru import logger from httprunner.utils import get_platform, ExtendJSONEncoder @pytest.fixture(scope="session", autouse=True) def session_fixture(request): """setup and teardown each task""" logger.info(f"start running testcases ...") start_at = time.time() yield logger.info(f"task finished, generate task summary for --save-tests") summary = { "success": True, "stat": { "testcases": {"total": 0, "success": 0, "fail": 0}, "teststeps": {"total": 0, "failures": 0, "successes": 0}, }, "time": {"start_at": start_at, "duration": time.time() - start_at}, "platform": get_platform(), "details": [], } for item in request.node.items: testcase_summary = item.instance.get_summary() summary["success"] &= testcase_summary.success summary["stat"]["testcases"]["total"] += 1 summary["stat"]["teststeps"]["total"] += len(testcase_summary.step_datas) if testcase_summary.success: summary["stat"]["testcases"]["success"] += 1 summary["stat"]["teststeps"]["successes"] += len( testcase_summary.step_datas ) else: summary["stat"]["testcases"]["fail"] += 1 summary["stat"]["teststeps"]["successes"] += ( len(testcase_summary.step_datas) - 1 ) summary["stat"]["teststeps"]["failures"] += 1 testcase_summary_json = testcase_summary.dict() testcase_summary_json["records"] = testcase_summary_json.pop("step_datas") summary["details"].append(testcase_summary_json) summary_path = r"{{SUMMARY_PATH_PLACEHOLDER}}" summary_dir = os.path.dirname(summary_path) os.makedirs(summary_dir, exist_ok=True) with open(summary_path, "w", encoding="utf-8") as f: json.dump(summary, f, indent=4, ensure_ascii=False, cls=ExtendJSONEncoder) logger.info(f"generated task summary: {summary_path}") ''' project_meta = load_project_meta(test_path) project_root_dir = project_meta.RootDir conftest_path = os.path.join(project_root_dir, "conftest.py") test_path = os.path.abspath(test_path) logs_dir_path = os.path.join(project_root_dir, "logs") test_path_relative_path = convert_relative_project_root_dir(test_path) if os.path.isdir(test_path): file_foder_path = os.path.join(logs_dir_path, test_path_relative_path) dump_file_name = "all.summary.json" else: file_relative_folder_path, test_file = os.path.split(test_path_relative_path) file_foder_path = os.path.join(logs_dir_path, file_relative_folder_path) test_file_name, _ = os.path.splitext(test_file) dump_file_name = f"{test_file_name}.summary.json" summary_path = os.path.join(file_foder_path, dump_file_name) conftest_content = conftest_content.replace( "{{SUMMARY_PATH_PLACEHOLDER}}", summary_path ) dir_path = os.path.dirname(conftest_path) if not os.path.exists(dir_path): os.makedirs(dir_path) with open(conftest_path, "w", encoding="utf-8") as f: f.write(conftest_content) logger.info("generated conftest.py to generate summary.json") def ensure_path_sep(path: Text) -> Text: """ ensure compatibility with different path separators of Linux and Windows """ if "/" in path: path = os.sep.join(path.split("/")) if "\\" in path: path = os.sep.join(path.split("\\")) return path
30.315789
112
0.61789
1,438
12,096
4.997914
0.176634
0.019897
0.016279
0.011688
0.269514
0.200362
0.132183
0.078753
0.068735
0.041185
0
0.006964
0.252149
12,096
398
113
30.39196
0.78753
0.081019
0
0.123188
0
0
0.333424
0.088195
0
0
0
0.002513
0
1
0.043478
false
0
0.050725
0
0.141304
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a49026066247a3018418704bbd8ff4e56b70f26
2,888
py
Python
examples/demo/basic/scatter.py
ContinuumIO/chaco
e4a42b91cb25ef7191fd465caaef2c3256fc668e
[ "BSD-3-Clause" ]
3
2017-09-17T17:32:06.000Z
2022-03-15T13:04:43.000Z
examples/demo/basic/scatter.py
ContinuumIO/chaco
e4a42b91cb25ef7191fd465caaef2c3256fc668e
[ "BSD-3-Clause" ]
null
null
null
examples/demo/basic/scatter.py
ContinuumIO/chaco
e4a42b91cb25ef7191fd465caaef2c3256fc668e
[ "BSD-3-Clause" ]
5
2015-05-17T16:08:11.000Z
2021-02-23T09:23:42.000Z
""" Scatter plot with panning and zooming Shows a scatter plot of a set of random points, with basic Chaco panning and zooming. Interacting with the plot: - Left-mouse-drag pans the plot. - Mouse wheel up and down zooms the plot in and out. - Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow and alt-right-arrow moves you forwards and backwards through the "zoom history". """ # Major library imports from numpy import sort from numpy.random import random # Enthought library imports from enable.api import Component, ComponentEditor from traits.api import HasTraits, Instance from traitsui.api import Item, Group, View # Chaco imports from chaco.api import ArrayPlotData, Plot from chaco.tools.api import PanTool, ZoomTool #=============================================================================== # # Create the Chaco plot. #=============================================================================== def _create_plot_component(): # Create some data numpts = 5000 x = sort(random(numpts)) y = random(numpts) # Create a plot data obect and give it this data pd = ArrayPlotData() pd.set_data("index", x) pd.set_data("value", y) # Create the plot plot = Plot(pd) plot.plot(("index", "value"), type="scatter", marker="circle", index_sort="ascending", color="orange", marker_size=3, bgcolor="white") # Tweak some of the plot properties plot.title = "Scatter Plot" plot.line_width = 0.5 plot.padding = 50 # Attach some tools to the plot plot.tools.append(PanTool(plot, constrain_key="shift")) zoom = ZoomTool(component=plot, tool_mode="box", always_on=False) plot.overlays.append(zoom) return plot #=============================================================================== # Attributes to use for the plot view. size = (650, 650) title = "Basic scatter plot" bg_color="lightgray" #=============================================================================== # # Demo class that is used by the demo.py application. #=============================================================================== class Demo(HasTraits): plot = Instance(Component) traits_view = View( Group( Item('plot', editor=ComponentEditor(size=size, bgcolor=bg_color), show_label=False), orientation = "vertical"), resizable=True, title=title ) def _plot_default(self): return _create_plot_component() demo = Demo() if __name__ == "__main__": demo.configure_traits() #--EOF---
29.773196
80
0.541205
320
2,888
4.79375
0.45
0.031943
0.022164
0
0
0
0
0
0
0
0
0.006878
0.244806
2,888
96
81
30.083333
0.69647
0.421053
0
0
0
0
0.072904
0
0
0
0
0
0
1
0.042553
false
0
0.148936
0.021277
0.297872
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a4908d9ecd7f27856ad9555eafa94debe8ca0ea
1,347
py
Python
webstr/core/config.py
fbalak/webstr
7c7e552fb9943bf664b94ca75a88747c0b243722
[ "Apache-2.0" ]
3
2017-03-01T11:51:12.000Z
2018-04-16T13:09:56.000Z
webstr/core/config.py
fbalak/webstr
7c7e552fb9943bf664b94ca75a88747c0b243722
[ "Apache-2.0" ]
null
null
null
webstr/core/config.py
fbalak/webstr
7c7e552fb9943bf664b94ca75a88747c0b243722
[ "Apache-2.0" ]
1
2018-04-16T13:09:34.000Z
2018-04-16T13:09:34.000Z
""" Central configuration module of webstr selenium tests. This module provides configuration options along with default values and function to redefine values. """ # Copyright 2016 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import sys SELENIUM_LOG_LEVEL = logging.INFO SCHEME = 'https' PORT = 443 BROWSER = 'Firefox' BROWSER_VERSION = '' BROWSER_PLATFORM = 'ANY' SELENIUM_SERVER = None SELENIUM_PORT = 4444 BROWSER_WIDTH = 1280 BROWSER_HEIGHT = 1024 def update_value(key_name, value, force=False): """ Update single value of this config module. """ this_module = sys.modules[__name__] key_name = key_name.upper() # raise AttributeError if we try to define new value (unless force is used) if not force: getattr(this_module, key_name) setattr(this_module, key_name, value)
27.489796
79
0.746845
195
1,347
5.051282
0.6
0.060914
0.026396
0.032487
0
0
0
0
0
0
0
0.020871
0.181886
1,347
48
80
28.0625
0.872958
0.60876
0
0
0
0
0.03055
0
0
0
0
0
0
1
0.055556
false
0
0.111111
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a4957ae4c91cc14cfa8216c87afffecedc2a26e
641
py
Python
operations/mutations/mutation.py
PiotrBosowski/feat-genes
8e6604fd4e121022f8ac988d9b56985de01b8331
[ "MIT" ]
null
null
null
operations/mutations/mutation.py
PiotrBosowski/feat-genes
8e6604fd4e121022f8ac988d9b56985de01b8331
[ "MIT" ]
null
null
null
operations/mutations/mutation.py
PiotrBosowski/feat-genes
8e6604fd4e121022f8ac988d9b56985de01b8331
[ "MIT" ]
null
null
null
import random class Mutation: def __init__(self, chrom_mut_chance, gen_mut_chance): self.chrom_mut_chance = chrom_mut_chance self.gen_mut_chance = gen_mut_chance def __call__(self, population): chroms_to_mutate = random.sample( population, round(self.chrom_mut_chance * len(population))) for chrom in chroms_to_mutate: genes_to_mutate = random.sample( range(len(chrom)), round(self.gen_mut_chance * len(chrom))) for gt in genes_to_mutate: chrom[gt] = int(not bool(chrom[gt])) return population
32.05
59
0.620905
80
641
4.575
0.35
0.196721
0.153005
0.147541
0.114754
0
0
0
0
0
0
0
0.299532
641
19
60
33.736842
0.815145
0
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0.0625
0
0.3125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a498f8f754b453bd4fdad3c6f6282e67b1ff4ac
1,551
py
Python
examples/CountLettersInList.py
Ellis0817/Introduction-to-Programming-Using-Python
1882a2a846162d5ff56d4d56c3940b638ef408bd
[ "MIT" ]
null
null
null
examples/CountLettersInList.py
Ellis0817/Introduction-to-Programming-Using-Python
1882a2a846162d5ff56d4d56c3940b638ef408bd
[ "MIT" ]
4
2019-11-07T12:32:19.000Z
2020-07-19T14:04:44.000Z
examples/CountLettersInList.py
Ellis0817/Introduction-to-Programming-Using-Python
1882a2a846162d5ff56d4d56c3940b638ef408bd
[ "MIT" ]
5
2019-12-04T15:56:55.000Z
2022-01-14T06:19:18.000Z
import RandomCharacter # Defined in Listing 6.9 def main(): """Main.""" # Create a list of characters chars = createList() # Display the list print("The lowercase letters are:") displayList(chars) # Count the occurrences of each letter counts = countLetters(chars) # Display counts print("The occurrences of each letter are:") displayCounts(counts) def createList(): """Create a list of characters.""" # Create an empty list chars = [] # Create lowercase letters randomly and add them to the list for i in range(100): chars.append(RandomCharacter.getRandomLowerCaseLetter()) # Return the list return chars def displayList(chars): """Display the list of characters.""" # Display the characters in the list 20 on each line for i in range(len(chars)): if (i + 1) % 20 == 0: print(chars[i]) else: print(chars[i], end=' ') def countLetters(chars): """Count the occurrences of each letter.""" # Create a list of 26 integers with initial value 0 counts = 26 * [0] # For each lowercase letter in the list, count it for i in range(len(chars)): counts[ord(chars[i]) - ord('a')] += 1 return counts def displayCounts(counts): """Display counts.""" for i in range(len(counts)): if (i + 1) % 10 == 0: print(counts[i], chr(i + ord('a'))) else: print(counts[i], chr(i + ord('a')), end=' ') print() main() # Call the main function
23.149254
64
0.597679
204
1,551
4.544118
0.313725
0.045307
0.02589
0.047465
0.254585
0.161812
0.12082
0
0
0
0
0.019731
0.281109
1,551
66
65
23.5
0.811659
0.328175
0
0.125
0
0
0.065672
0
0
0
0
0
0
1
0.15625
false
0
0.03125
0
0.25
0.21875
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a49e1637a3ffcd5ae7b64809f0205d8b48bfcf6
627
py
Python
desktop/core/ext-py/python-openid-2.2.5/openid/test/test_htmldiscover.py
kokosing/hue
2307f5379a35aae9be871e836432e6f45138b3d9
[ "Apache-2.0" ]
5,079
2015-01-01T03:39:46.000Z
2022-03-31T07:38:22.000Z
desktop/core/ext-py/python-openid-2.2.5/openid/test/test_htmldiscover.py
zks888/hue
93a8c370713e70b216c428caa2f75185ef809deb
[ "Apache-2.0" ]
1,623
2015-01-01T08:06:24.000Z
2022-03-30T19:48:52.000Z
desktop/core/ext-py/python-openid-2.2.5/openid/test/test_htmldiscover.py
zks888/hue
93a8c370713e70b216c428caa2f75185ef809deb
[ "Apache-2.0" ]
2,033
2015-01-04T07:18:02.000Z
2022-03-28T19:55:47.000Z
from openid.consumer.discover import OpenIDServiceEndpoint import datadriven class BadLinksTestCase(datadriven.DataDrivenTestCase): cases = [ '', "http://not.in.a.link.tag/", '<link rel="openid.server" href="not.in.html.or.head" />', ] def __init__(self, data): datadriven.DataDrivenTestCase.__init__(self, data) self.data = data def runOneTest(self): actual = OpenIDServiceEndpoint.fromHTML('http://unused.url/', self.data) expected = [] self.failUnlessEqual(expected, actual) def pyUnitTests(): return datadriven.loadTests(__name__)
28.5
80
0.660287
63
627
6.380952
0.603175
0.079602
0.059701
0
0
0
0
0
0
0
0
0
0.212121
627
21
81
29.857143
0.813765
0
0
0
0
0.058824
0.1563
0.041467
0
0
0
0
0
1
0.176471
false
0
0.117647
0.058824
0.470588
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a4c02948fcf1ba6f4a5b3cae666b7bb9cd4c29a
4,508
py
Python
src/command/voice_log/chart.py
link1345/Vol-GameClanTools-DiscordBot
c3349f38d59cba59161b8c54c172e39ba873c53d
[ "MIT" ]
null
null
null
src/command/voice_log/chart.py
link1345/Vol-GameClanTools-DiscordBot
c3349f38d59cba59161b8c54c172e39ba873c53d
[ "MIT" ]
25
2021-08-11T13:02:18.000Z
2021-08-20T23:24:19.000Z
src/command/voice_log/chart.py
link1345/Vol-GameClanTools-DiscordBot
c3349f38d59cba59161b8c54c172e39ba873c53d
[ "MIT" ]
null
null
null
import discord import os import json import datetime import pandas as pd from dateutil.relativedelta import relativedelta from pprint import pprint import base.ColorPrint as CPrint import command.voice_log.Config_Main as CSetting def most_old_Month() : old_month = 1 labels = [] fileNameList = [] while True : filetime = datetime.datetime.today() - relativedelta(months=old_month) m_month = datetime.datetime.strftime(filetime,'%m') m_year = datetime.datetime.strftime(filetime,'%Y') filename = CSetting.baseLogFolder + CSetting.JSONPATH_row + m_year + m_month + ".json" if not os.path.exists( filename ) : old_month -= 1 # 調査用に+1してあるので、実際の値は、これにold_monthに-1したものとなる。 break labels.append( m_year + "/" + m_month ) fileNameList.append( filename ) old_month += 1 return old_month , labels , fileNameList async def makeOldTimeList( client: discord.Client, MonthFileList:list[str] , IndexLabel:list[str], RoleList: list[int] = CSetting.OneMonthOutput_RoleID ): all_df = None for fileName in MonthFileList : df = await makeTimeList( client, Datafile_path=fileName , RoleList=RoleList) #print( "test1" ) pprint( df ) if df is None : break labelname = IndexLabel[MonthFileList.index(fileName)] df = df.rename(columns={'time': labelname }) if MonthFileList.index(fileName) == 0 : all_df = df else : df = df.drop(columns=['name']) all_df = pd.merge(all_df, df , left_index=True, right_index=True) #all_df = pd.merge(all_df, df , left_index=True) #df.loc[:,[labelname]] #pprint(all_df) return all_df async def UserRoleMember( client: discord.Client, RoleList: list[int] ) : """ [VC] 指定ロールに参加しているメンバーを抽出する Args: client (discord.Client): クライアント RoleList (list[int]): 役職ID return: list[discord.Member]: 指定ロールに参加しているメンバー """ data = [] for guild_item in client.guilds : # ギルドデータ更新 await guild_item.chunk() # ロール制限がなければ、全員分を取ってくる if len(RoleList) == 0 : data += guild_item.members continue # ロール制限がなければ、該当ロール部を取ってくる for role_item in guild_item.roles : if role_item.id in RoleList : data += role_item.members return data async def makeTimeList( client: discord.Client, Datafile_path: str , RoleList: list[int]): """ [VC] 生のログデータを計算して、表にして返す。 Args: client (discord.Client): クライアント RoleList (list[int]): 役職ID mode (string): ユーザーを示すものは、何か?(UserName or ID) return: pd.DataFrame: 計算済みデータ """ # ユーザーリスト取得 members = await UserRoleMember(client, RoleList) # IDだけ抽出 def getID(members: list[discord.Member]): IDlist = [] Namelist = [] for member in members : IDlist.append( member.id ) Namelist.append( member.name + "#" + member.discriminator ) return IDlist , Namelist members_IDlist , members_Namelist = getID(members=members) if members_IDlist is None or members_IDlist == [] : return None # JSON取得 orig_TimeData : dict try : with open( Datafile_path ) as f: orig_TimeData = json.load(f) except : CPrint.error_print("JSONではありません") import traceback traceback.print_exc() return None if orig_TimeData is None : return None #df = pd.DataFrame({ # 'start': [None, None], # 'end': [None, None], # 'time': [13, 23]}, # index=['ONE', 'TWO'] #) df_dict = { 'name': members_Namelist, 'start': [None] * len(members), 'exit': [None] * len(members), 'time': [0.0] * len(members), } # 計算 for item in orig_TimeData : try : indexNum = members_IDlist.index(item["member.id"]) except ValueError as error : # 現在の鯖に、存在しない人は処理しない。 continue if item["Flag"] == "entry" : df_dict["start"][indexNum] = item["time"] if item["Flag"] == "exit" : # スタートがないのに、エンドがある場合 if df_dict["start"][indexNum] is None : # とりあえず、月初めに入室した扱いにする(他の方法も検討中。そもそも入室してない扱いetc..) tmp_startTime = datetime.now().strftime("%Y/%m/01 00:00:00") df_dict["start"][indexNum] = tmp_startTime # -- df_dict["exit"][indexNum] = item["time"] # 差分計算 a_time = datetime.datetime.strptime( df_dict["start"][indexNum] , '%Y/%m/%d %H:%M:%S') b_time = datetime.datetime.strptime( df_dict["exit"][indexNum] , '%Y/%m/%d %H:%M:%S') time : float = (b_time - a_time).total_seconds() #print( "time : " + str(time) ) if time < 0.0 : df_dict["time"][indexNum] += 0.0 else : df_dict["time"][indexNum] += time # DataFrameに変更 df = pd.DataFrame(df_dict, index=members_IDlist ) # 作業用の"start"と"end"を削除 df = df.drop(columns=['start','exit']) # 計算 df["time"] = df["time"] / 60 / 60 #pprint(df) return df
23.479167
154
0.675244
593
4,508
5.016863
0.306914
0.020168
0.031933
0.025546
0.08605
0.08605
0.063193
0.053782
0.053782
0.021513
0
0.008139
0.182343
4,508
191
155
23.602094
0.798969
0.110248
0
0.104762
0
0
0.050894
0
0
0
0
0
0
1
0.019048
false
0
0.095238
0
0.190476
0.038095
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a4c9bc797a43e5add896c8bde8af43dfb42905c
23,511
py
Python
python/src/vmaf/core/feature_extractor.py
jayholman/vmaf
0bba4faf68ab89e38314cc596e6908b4fb83984d
[ "Apache-2.0" ]
40
2019-07-04T06:22:10.000Z
2022-03-10T14:49:33.000Z
python/src/vmaf/core/feature_extractor.py
jayholman/vmaf
0bba4faf68ab89e38314cc596e6908b4fb83984d
[ "Apache-2.0" ]
null
null
null
python/src/vmaf/core/feature_extractor.py
jayholman/vmaf
0bba4faf68ab89e38314cc596e6908b4fb83984d
[ "Apache-2.0" ]
8
2019-08-20T08:14:41.000Z
2021-11-18T07:01:19.000Z
from abc import ABCMeta, abstractmethod import os from vmaf.tools.misc import make_absolute_path, run_process from vmaf.tools.stats import ListStats __copyright__ = "Copyright 2016-2018, Netflix, Inc." __license__ = "Apache, Version 2.0" import re import numpy as np import ast from vmaf import ExternalProgramCaller, to_list from vmaf.config import VmafConfig, VmafExternalConfig from vmaf.core.executor import Executor from vmaf.core.result import Result from vmaf.tools.reader import YuvReader class FeatureExtractor(Executor): """ FeatureExtractor takes in a list of assets, and run feature extraction on them, and return a list of corresponding results. A FeatureExtractor must specify a unique type and version combination (by the TYPE and VERSION attribute), so that the Result generated by it can be identified. A derived class of FeatureExtractor must: 1) Override TYPE and VERSION 2) Override _generate_result(self, asset), which call a command-line executable and generate feature scores in a log file. 3) Override _get_feature_scores(self, asset), which read the feature scores from the log file, and return the scores in a dictionary format. For an example, follow VmafFeatureExtractor. """ __metaclass__ = ABCMeta @property @abstractmethod def ATOM_FEATURES(self): raise NotImplementedError def _read_result(self, asset): result = {} result.update(self._get_feature_scores(asset)) executor_id = self.executor_id return Result(asset, executor_id, result) @classmethod def get_scores_key(cls, atom_feature): return "{type}_{atom_feature}_scores".format( type=cls.TYPE, atom_feature=atom_feature) @classmethod def get_score_key(cls, atom_feature): return "{type}_{atom_feature}_score".format( type=cls.TYPE, atom_feature=atom_feature) def _get_feature_scores(self, asset): # routine to read the feature scores from the log file, and return # the scores in a dictionary format. log_file_path = self._get_log_file_path(asset) atom_feature_scores_dict = {} atom_feature_idx_dict = {} for atom_feature in self.ATOM_FEATURES: atom_feature_scores_dict[atom_feature] = [] atom_feature_idx_dict[atom_feature] = 0 with open(log_file_path, 'rt') as log_file: for line in log_file.readlines(): for atom_feature in self.ATOM_FEATURES: re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature) mo = re.match(re_template, line) if mo: cur_idx = int(mo.group(1)) assert cur_idx == atom_feature_idx_dict[atom_feature] # parse value, allowing NaN and inf val = float(mo.group(2)) if np.isnan(val) or np.isinf(val): val = None atom_feature_scores_dict[atom_feature].append(val) atom_feature_idx_dict[atom_feature] += 1 continue len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]]) assert len_score != 0 for atom_feature in self.ATOM_FEATURES[1:]: assert len_score == len(atom_feature_scores_dict[atom_feature]), \ "Feature data possibly corrupt. Run cleanup script and try again." feature_result = {} for atom_feature in self.ATOM_FEATURES: scores_key = self.get_scores_key(atom_feature) feature_result[scores_key] = atom_feature_scores_dict[atom_feature] return feature_result class VmafFeatureExtractor(FeatureExtractor): TYPE = "VMAF_feature" # VERSION = '0.1' # vmaf_study; Anush's VIF fix # VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr # VERSION = '0.2.1' # expose vif num/den of each scale # VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case # VERSION = '0.2.2b' # expose adm_den/num_scalex # VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef # VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step # VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2 VERSION = '0.2.4c' # Modify by moving motion2 to c code ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2', 'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr', 'vif_num_scale0', 'vif_den_scale0', 'vif_num_scale1', 'vif_den_scale1', 'vif_num_scale2', 'vif_den_scale2', 'vif_num_scale3', 'vif_den_scale3', 'adm_num_scale0', 'adm_den_scale0', 'adm_num_scale1', 'adm_den_scale1', 'adm_num_scale2', 'adm_den_scale2', 'adm_num_scale3', 'adm_den_scale3', ] DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3', 'vif2', 'adm2', 'adm3', 'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3', ] ADM2_CONSTANT = 0 ADM_SCALE_CONSTANT = 0 def _generate_result(self, asset): # routine to call the command-line executable and generate feature # scores in the log file. quality_width, quality_height = asset.quality_width_height log_file_path = self._get_log_file_path(asset) yuv_type=self._get_workfile_yuv_type(asset) ref_path=asset.ref_workfile_path dis_path=asset.dis_workfile_path w=quality_width h=quality_height logger = self.logger ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger) @classmethod def _post_process_result(cls, result): # override Executor._post_process_result result = super(VmafFeatureExtractor, cls)._post_process_result(result) # adm2 = # (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT) adm2_scores_key = cls.get_scores_key('adm2') adm_num_scores_key = cls.get_scores_key('adm_num') adm_den_scores_key = cls.get_scores_key('adm_den') result.result_dict[adm2_scores_key] = list( (np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) / (np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT) ) # vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3 vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0') vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0') vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1') vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1') vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2') vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2') vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3') vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3') vif_scale0_scores_key = cls.get_scores_key('vif_scale0') vif_scale1_scores_key = cls.get_scores_key('vif_scale1') vif_scale2_scores_key = cls.get_scores_key('vif_scale2') vif_scale3_scores_key = cls.get_scores_key('vif_scale3') result.result_dict[vif_scale0_scores_key] = list( (np.array(result.result_dict[vif_num_scale0_scores_key]) / np.array(result.result_dict[vif_den_scale0_scores_key])) ) result.result_dict[vif_scale1_scores_key] = list( (np.array(result.result_dict[vif_num_scale1_scores_key]) / np.array(result.result_dict[vif_den_scale1_scores_key])) ) result.result_dict[vif_scale2_scores_key] = list( (np.array(result.result_dict[vif_num_scale2_scores_key]) / np.array(result.result_dict[vif_den_scale2_scores_key])) ) result.result_dict[vif_scale3_scores_key] = list( (np.array(result.result_dict[vif_num_scale3_scores_key]) / np.array(result.result_dict[vif_den_scale3_scores_key])) ) # vif2 = # ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) + # (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0 vif_scores_key = cls.get_scores_key('vif2') result.result_dict[vif_scores_key] = list( ( (np.array(result.result_dict[vif_num_scale0_scores_key]) / np.array(result.result_dict[vif_den_scale0_scores_key])) + (np.array(result.result_dict[vif_num_scale1_scores_key]) / np.array(result.result_dict[vif_den_scale1_scores_key])) + (np.array(result.result_dict[vif_num_scale2_scores_key]) / np.array(result.result_dict[vif_den_scale2_scores_key])) + (np.array(result.result_dict[vif_num_scale3_scores_key]) / np.array(result.result_dict[vif_den_scale3_scores_key])) ) / 4.0 ) # adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3 adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0') adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0') adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1') adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1') adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2') adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2') adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3') adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3') adm_scale0_scores_key = cls.get_scores_key('adm_scale0') adm_scale1_scores_key = cls.get_scores_key('adm_scale1') adm_scale2_scores_key = cls.get_scores_key('adm_scale2') adm_scale3_scores_key = cls.get_scores_key('adm_scale3') result.result_dict[adm_scale0_scores_key] = list( (np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) ) result.result_dict[adm_scale1_scores_key] = list( (np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT) ) result.result_dict[adm_scale2_scores_key] = list( (np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT) ) result.result_dict[adm_scale3_scores_key] = list( (np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT) ) # adm3 = \ # (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0 adm3_scores_key = cls.get_scores_key('adm3') result.result_dict[adm3_scores_key] = list( ( ((np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)) + ((np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)) + ((np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)) + ((np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)) ) / 4.0 ) # validate for feature in cls.DERIVED_ATOM_FEATURES: assert cls.get_scores_key(feature) in result.result_dict return result class VifFrameDifferenceFeatureExtractor(FeatureExtractor): TYPE = "VifDiff_feature" VERSION = '0.1' ATOM_FEATURES = ['vifdiff', 'vifdiff_num', 'vifdiff_den', 'vifdiff_num_scale0', 'vifdiff_den_scale0', 'vifdiff_num_scale1', 'vifdiff_den_scale1', 'vifdiff_num_scale2', 'vifdiff_den_scale2', 'vifdiff_num_scale3', 'vifdiff_den_scale3', ] DERIVED_ATOM_FEATURES = ['vifdiff_scale0', 'vifdiff_scale1', 'vifdiff_scale2', 'vifdiff_scale3', ] ADM2_CONSTANT = 0 ADM_SCALE_CONSTANT = 0 def _generate_result(self, asset): # routine to call the command-line executable and generate feature # scores in the log file. quality_width, quality_height = asset.quality_width_height log_file_path = self._get_log_file_path(asset) yuv_type=self._get_workfile_yuv_type(asset) ref_path=asset.ref_workfile_path dis_path=asset.dis_workfile_path w=quality_width h=quality_height logger = self.logger ExternalProgramCaller.call_vifdiff_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger) @classmethod def _post_process_result(cls, result): # override Executor._post_process_result result = super(VifFrameDifferenceFeatureExtractor, cls)._post_process_result(result) # vifdiff_scalei = vifdiff_num_scalei / vifdiff_den_scalei, i = 0, 1, 2, 3 vifdiff_num_scale0_scores_key = cls.get_scores_key('vifdiff_num_scale0') vifdiff_den_scale0_scores_key = cls.get_scores_key('vifdiff_den_scale0') vifdiff_num_scale1_scores_key = cls.get_scores_key('vifdiff_num_scale1') vifdiff_den_scale1_scores_key = cls.get_scores_key('vifdiff_den_scale1') vifdiff_num_scale2_scores_key = cls.get_scores_key('vifdiff_num_scale2') vifdiff_den_scale2_scores_key = cls.get_scores_key('vifdiff_den_scale2') vifdiff_num_scale3_scores_key = cls.get_scores_key('vifdiff_num_scale3') vifdiff_den_scale3_scores_key = cls.get_scores_key('vifdiff_den_scale3') vifdiff_scale0_scores_key = cls.get_scores_key('vifdiff_scale0') vifdiff_scale1_scores_key = cls.get_scores_key('vifdiff_scale1') vifdiff_scale2_scores_key = cls.get_scores_key('vifdiff_scale2') vifdiff_scale3_scores_key = cls.get_scores_key('vifdiff_scale3') result.result_dict[vifdiff_scale0_scores_key] = list( (np.array(result.result_dict[vifdiff_num_scale0_scores_key]) / np.array(result.result_dict[vifdiff_den_scale0_scores_key])) ) result.result_dict[vifdiff_scale1_scores_key] = list( (np.array(result.result_dict[vifdiff_num_scale1_scores_key]) / np.array(result.result_dict[vifdiff_den_scale1_scores_key])) ) result.result_dict[vifdiff_scale2_scores_key] = list( (np.array(result.result_dict[vifdiff_num_scale2_scores_key]) / np.array(result.result_dict[vifdiff_den_scale2_scores_key])) ) result.result_dict[vifdiff_scale3_scores_key] = list( (np.array(result.result_dict[vifdiff_num_scale3_scores_key]) / np.array(result.result_dict[vifdiff_den_scale3_scores_key])) ) # validate for feature in cls.DERIVED_ATOM_FEATURES: assert cls.get_scores_key(feature) in result.result_dict return result class PsnrFeatureExtractor(FeatureExtractor): TYPE = "PSNR_feature" VERSION = "1.0" ATOM_FEATURES = ['psnr'] def _generate_result(self, asset): # routine to call the command-line executable and generate quality # scores in the log file. quality_width, quality_height = asset.quality_width_height log_file_path = self._get_log_file_path(asset) yuv_type=self._get_workfile_yuv_type(asset) ref_path=asset.ref_workfile_path dis_path=asset.dis_workfile_path w=quality_width h=quality_height logger = self.logger ExternalProgramCaller.call_psnr(yuv_type, ref_path, dis_path, w, h, log_file_path, logger) class MomentFeatureExtractor(FeatureExtractor): TYPE = "Moment_feature" # VERSION = "1.0" # call executable VERSION = "1.1" # python only ATOM_FEATURES = ['ref1st', 'ref2nd', 'dis1st', 'dis2nd', ] DERIVED_ATOM_FEATURES = ['refvar', 'disvar', ] def _generate_result(self, asset): # routine to call the command-line executable and generate feature # scores in the log file. quality_w, quality_h = asset.quality_width_height ref_scores_mtx = None with YuvReader(filepath=asset.ref_workfile_path, width=quality_w, height=quality_h, yuv_type=self._get_workfile_yuv_type(asset)) as ref_yuv_reader: scores_mtx_list = [] i = 0 for ref_yuv in ref_yuv_reader: ref_y = ref_yuv[0] firstm = ref_y.mean() secondm = ref_y.var() + firstm**2 scores_mtx_list.append(np.hstack(([firstm], [secondm]))) i += 1 ref_scores_mtx = np.vstack(scores_mtx_list) dis_scores_mtx = None with YuvReader(filepath=asset.dis_workfile_path, width=quality_w, height=quality_h, yuv_type=self._get_workfile_yuv_type(asset)) as dis_yuv_reader: scores_mtx_list = [] i = 0 for dis_yuv in dis_yuv_reader: dis_y = dis_yuv[0] firstm = dis_y.mean() secondm = dis_y.var() + firstm**2 scores_mtx_list.append(np.hstack(([firstm], [secondm]))) i += 1 dis_scores_mtx = np.vstack(scores_mtx_list) assert ref_scores_mtx is not None and dis_scores_mtx is not None log_dict = {'ref_scores_mtx': ref_scores_mtx.tolist(), 'dis_scores_mtx': dis_scores_mtx.tolist()} log_file_path = self._get_log_file_path(asset) with open(log_file_path, 'wt') as log_file: log_file.write(str(log_dict)) def _get_feature_scores(self, asset): # routine to read the feature scores from the log file, and return # the scores in a dictionary format. log_file_path = self._get_log_file_path(asset) with open(log_file_path, 'rt') as log_file: log_str = log_file.read() log_dict = ast.literal_eval(log_str) ref_scores_mtx = np.array(log_dict['ref_scores_mtx']) dis_scores_mtx = np.array(log_dict['dis_scores_mtx']) _, num_ref_features = ref_scores_mtx.shape assert num_ref_features == 2 # ref1st, ref2nd _, num_dis_features = dis_scores_mtx.shape assert num_dis_features == 2 # dis1st, dis2nd feature_result = {} feature_result[self.get_scores_key('ref1st')] = list(ref_scores_mtx[:, 0]) feature_result[self.get_scores_key('ref2nd')] = list(ref_scores_mtx[:, 1]) feature_result[self.get_scores_key('dis1st')] = list(dis_scores_mtx[:, 0]) feature_result[self.get_scores_key('dis2nd')] = list(dis_scores_mtx[:, 1]) return feature_result @classmethod def _post_process_result(cls, result): # override Executor._post_process_result result = super(MomentFeatureExtractor, cls)._post_process_result(result) # calculate refvar and disvar from ref1st, ref2nd, dis1st, dis2nd refvar_scores_key = cls.get_scores_key('refvar') ref1st_scores_key = cls.get_scores_key('ref1st') ref2nd_scores_key = cls.get_scores_key('ref2nd') disvar_scores_key = cls.get_scores_key('disvar') dis1st_scores_key = cls.get_scores_key('dis1st') dis2nd_scores_key = cls.get_scores_key('dis2nd') get_var = lambda m: m[1] - m[0] * m[0] result.result_dict[refvar_scores_key] = \ to_list(map(get_var, zip(result.result_dict[ref1st_scores_key], result.result_dict[ref2nd_scores_key]))) result.result_dict[disvar_scores_key] = \ to_list(map(get_var, zip(result.result_dict[dis1st_scores_key], result.result_dict[dis2nd_scores_key]))) # validate for feature in cls.DERIVED_ATOM_FEATURES: assert cls.get_scores_key(feature) in result.result_dict return result class SsimFeatureExtractor(FeatureExtractor): TYPE = "SSIM_feature" # VERSION = "1.0" VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0 ATOM_FEATURES = ['ssim', 'ssim_l', 'ssim_c', 'ssim_s'] def _generate_result(self, asset): # routine to call the command-line executable and generate quality # scores in the log file. quality_width, quality_height = asset.quality_width_height log_file_path = self._get_log_file_path(asset) yuv_type=self._get_workfile_yuv_type(asset) ref_path=asset.ref_workfile_path dis_path=asset.dis_workfile_path w=quality_width h=quality_height logger = self.logger ExternalProgramCaller.call_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger) class MsSsimFeatureExtractor(FeatureExtractor): TYPE = "MS_SSIM_feature" # VERSION = "1.0" VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0 ATOM_FEATURES = ['ms_ssim', 'ms_ssim_l_scale0', 'ms_ssim_c_scale0', 'ms_ssim_s_scale0', 'ms_ssim_l_scale1', 'ms_ssim_c_scale1', 'ms_ssim_s_scale1', 'ms_ssim_l_scale2', 'ms_ssim_c_scale2', 'ms_ssim_s_scale2', 'ms_ssim_l_scale3', 'ms_ssim_c_scale3', 'ms_ssim_s_scale3', 'ms_ssim_l_scale4', 'ms_ssim_c_scale4', 'ms_ssim_s_scale4', ] def _generate_result(self, asset): # routine to call the command-line executable and generate quality # scores in the log file. quality_width, quality_height = asset.quality_width_height log_file_path = self._get_log_file_path(asset) yuv_type=self._get_workfile_yuv_type(asset) ref_path=asset.ref_workfile_path dis_path=asset.dis_workfile_path w=quality_width h=quality_height logger = self.logger ExternalProgramCaller.call_ms_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
44.02809
146
0.663562
3,134
23,511
4.554882
0.087428
0.105919
0.055482
0.052539
0.716848
0.672644
0.601191
0.565254
0.443923
0.41289
0
0.019697
0.246395
23,511
533
147
44.110694
0.78598
0.138616
0
0.288043
0
0
0.092788
0.002732
0
0
0
0
0.024457
1
0.040761
false
0
0.032609
0.005435
0.184783
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a4f114d5336abdf79c1eeb8751aaf58a158b4d8
1,382
py
Python
transformerquant/modules/attention/multi_head.py
StateOfTheArt-quant/transformerquant
f6775d7aa920b84908b0a09d9ba098b1fe87bdff
[ "Apache-2.0" ]
22
2019-11-02T12:00:38.000Z
2022-02-16T08:00:36.000Z
transformerquant/modules/attention/multi_head.py
StateOfTheArt-quant/transformerquant
f6775d7aa920b84908b0a09d9ba098b1fe87bdff
[ "Apache-2.0" ]
null
null
null
transformerquant/modules/attention/multi_head.py
StateOfTheArt-quant/transformerquant
f6775d7aa920b84908b0a09d9ba098b1fe87bdff
[ "Apache-2.0" ]
6
2020-04-19T08:10:03.000Z
2021-12-07T05:59:46.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import torch.nn as nn from .single import attention class MultiHeadedAttention(nn.Module): def __init__(self, d_model, nhead, dropout=0.1): super().__init__() assert d_model % nhead ==0 # we assume d_v always equal d_k self.d_k = d_model // nhead self.nhead = nhead self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)]) self.output_linear = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(p=dropout) def forward(self, query, key, value, mask=None): if mask is not None: mask = mask.unsqueeze(1) batch_size = query.size(0) # 1) Do all the linear projections in batch from d_model => h x d_k query, key, value = [l(x).view(batch_size, -1, self.nhead, self.d_k).transpose(1, 2) for l, x in zip(self.linear_layers, (query, key, value))] # 2) Apply attention on all the projected vectors in batch. x, attn = attention(query, key, value, mask=mask, dropout=self.dropout) # 3) "Concat" using a view and apply a final linear. x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.nhead * self.d_k) context = self.output_linear(x) return context#, attn
35.435897
92
0.595514
202
1,382
3.925743
0.391089
0.06053
0.065574
0.035309
0.123581
0.123581
0.07314
0.07314
0.07314
0
0
0.017294
0.288712
1,382
39
93
35.435897
0.78942
0.184515
0
0
0
0
0
0
0
0
0
0
0.047619
1
0.095238
false
0
0.095238
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a50b1cbdb77a8f4dc63235c790b37c0f8c9b3d2
5,658
py
Python
avatar/generalization.py
Julian-Theis/AVATAR
24fcd6eaa26f413be528a160d865d5d7e49a780b
[ "MIT" ]
7
2020-12-22T12:09:14.000Z
2022-03-29T12:50:35.000Z
avatar/generalization.py
ProminentLab/AVATAR
a20c767d8739a52f538927b4ec3d528952263d5a
[ "MIT" ]
10
2020-11-13T17:45:59.000Z
2022-02-10T00:50:38.000Z
avatar/generalization.py
ProminentLab/AVATAR
a20c767d8739a52f538927b4ec3d528952263d5a
[ "MIT" ]
2
2020-03-26T22:27:27.000Z
2020-07-07T22:36:41.000Z
import os, time, argparse from datetime import datetime from pm4py.objects.log.importer.csv import factory as csv_importer from pm4py.objects.log.exporter.xes import factory as xes_exporter from pm4py.objects.log.importer.xes import factory as xes_importer from pm4py.objects.petri.importer import pnml as pnml_importer from pm4py.evaluation.replay_fitness import factory as replay_factory from pm4py.evaluation.precision import factory as precision_factory from conf.settings import DATA_PATH WORK_PATH = os.path.abspath(os.getcwd()) def readFile(f_name1, f_name2, unique=False): traces = [] skipped = 0 with open(f_name1) as file: file_contents = file.read() file_contents = file_contents.split("\n") print("Number of train traces are:", str(len(file_contents))) for row in file_contents: if unique: if row not in traces: traces.append(row) else: skipped += 1 else: traces.append(row) with open(f_name2) as file: file_contents = file.read() file_contents = file_contents.split("\n") print("Number of generated traces are:", str(len(file_contents))) for row in file_contents: if unique: if row not in traces: traces.append(row) else: skipped += 1 else: traces.append(row) f_traces = [] for trace in traces: f_trace = [] t = trace.split(" ") for i in t: if i != "" and "<" not in i: f_trace.append(i) if len(f_trace) > 0: f_traces.append(f_trace) print("Number of traces are:", str(len(f_traces))) print("Number of skipped traces are:", str(skipped)) return f_traces def writeToFile(file, lst): with open(file, 'w') as outfile: for entry in lst: outfile.write(str(entry) + "\n") def convertToCsv(traces, to_path): lines = [] case = 0 timestamp = 0 line = "concept:name,case:concept:name,time:timestamp" lines.append(line) for trace in traces: for event in trace: timestamp = timestamp + 1 dt_object = datetime.fromtimestamp(timestamp) line = str(event) + "_" + "," + str(case) + "," + str(dt_object) lines.append(line) case = case + 1 writeToFile(str(to_path), lines) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-s', '--system', help='Which system (e.g. pb_system_5_3)', required=True) parser.add_argument('-sfx', '--suffix', help='Suffix (chosen epoch, e.g. 1981)', required=True) parser.add_argument('-j', '--job', help='Job (0/1)', required=True) parser.add_argument('-pn', '--pn', help='Petri net file to evaluate', required=True) parser.add_argument('-strategy', '--strategy', help='naive/mh', required=True) args = parser.parse_args() system = args.system suffix = int(args.suffix) job = args.job pn = args.pn strategy = args.strategy if DATA_PATH is None: train_file = os.path.join(WORK_PATH, "data", "variants", system + "_train.txt") gen_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt") csv_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv") xes_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes") pn_file = os.path.join(WORK_PATH, "data", "pns", system, pn) else: train_file = os.path.join(DATA_PATH, "variants", system + "_train.txt") gen_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt") csv_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv") xes_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes") pn_file = os.path.join(DATA_PATH, "pns", system, pn) """ READ FILES AND CONVERT TO XES """ traces = readFile(train_file,gen_file, unique=True) convertToCsv(traces=traces, to_path=csv_file) time.sleep(1) log = csv_importer.import_event_log(csv_file) xes_exporter.export_log(log, xes_file) time.sleep(1) """ PERFORM MEASUREMENT ON PN AND XES""" log = xes_importer.import_log(xes_file) net, initial_marking, final_marking = pnml_importer.import_net(pn_file) fitness = replay_factory.apply(log, net, initial_marking, final_marking) print("Fitness=", fitness) precision = precision_factory.apply(log, net, initial_marking, final_marking) print("Precision=", precision) fitness = fitness["log_fitness"] generalization = 2 * ((fitness * precision) / (fitness + precision)) if strategy == "mh": print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using MH SAMPLING on suffix ", str(suffix)," ***") elif strategy == "naive": print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using NAIVE SAMPLING on suffix ", str(suffix), " ***") else: raise ValueError("Unknown strategy.") print("AVATAR Generalization=", generalization)
39.84507
166
0.61824
728
5,658
4.623626
0.197802
0.019608
0.029709
0.041592
0.455437
0.366013
0.3571
0.345811
0.345811
0.301842
0
0.006744
0.240014
5,658
142
167
39.84507
0.776047
0
0
0.247788
0
0
0.145109
0.008062
0
0
0
0
0
1
0.026549
false
0
0.106195
0
0.141593
0.079646
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a51f623e8f7d8887b5aa54af4a94e17cde8759e
6,960
py
Python
huaweicloud-sdk-image/huaweicloudsdkimage/v1/image_client.py
handsome-baby/huaweicloud-sdk-python-v3
6cdcf1da8b098427e58fc3335a387c14df7776d0
[ "Apache-2.0" ]
1
2021-04-16T07:59:28.000Z
2021-04-16T07:59:28.000Z
huaweicloud-sdk-image/huaweicloudsdkimage/v1/image_client.py
Lencof/huaweicloud-sdk-python-v3
d13dc4e2830a83e295be6e4de021999b3376e34e
[ "Apache-2.0" ]
null
null
null
huaweicloud-sdk-image/huaweicloudsdkimage/v1/image_client.py
Lencof/huaweicloud-sdk-python-v3
d13dc4e2830a83e295be6e4de021999b3376e34e
[ "Apache-2.0" ]
1
2022-01-17T02:24:18.000Z
2022-01-17T02:24:18.000Z
# coding: utf-8 from __future__ import absolute_import import datetime import re import importlib import six from huaweicloudsdkcore.client import Client, ClientBuilder from huaweicloudsdkcore.exceptions import exceptions from huaweicloudsdkcore.utils import http_utils from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest class ImageClient(Client): """ :param configuration: .Configuration object for this client :param pool_threads: The number of threads to use for async requests to the API. More threads means more concurrent API requests. """ PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types NATIVE_TYPES_MAPPING = { 'int': int, 'long': int if six.PY3 else long, 'float': float, 'str': str, 'bool': bool, 'date': datetime.date, 'datetime': datetime.datetime, 'object': object, } def __init__(self): super(ImageClient, self).__init__() self.model_package = importlib.import_module("huaweicloudsdkimage.v1.model") self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'} @classmethod def new_builder(cls, clazz=None): if clazz is None: return ClientBuilder(cls) if clazz.__name__ != "ImageClient": raise TypeError("client type error, support client type is ImageClient") return ClientBuilder(clazz) def run_celebrity_recognition(self, request): """名人识别 分析并识别图片中包含的政治人物、明星及网红人物,返回人物信息及人脸坐标。 :param RunCelebrityRecognitionRequest request :return: RunCelebrityRecognitionResponse """ return self.run_celebrity_recognition_with_http_info(request) def run_celebrity_recognition_with_http_info(self, request): """名人识别 分析并识别图片中包含的政治人物、明星及网红人物,返回人物信息及人脸坐标。 :param RunCelebrityRecognitionRequest request :return: RunCelebrityRecognitionResponse """ all_params = ['body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1.0/image/celebrity-recognition', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='RunCelebrityRecognitionResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def run_image_tagging(self, request): """图像标签 自然图像的语义内容非常丰富,一个图像包含多个标签内容,图像标签服务准确识别自然图片中数百种场景、上千种通用物体及其属性,让智能相册管理、照片检索和分类、基于场景内容或者物体的广告推荐等功能更加直观。使用时用户发送待处理图片,返回图片标签内容及相应置信度。 :param RunImageTaggingRequest request :return: RunImageTaggingResponse """ return self.run_image_tagging_with_http_info(request) def run_image_tagging_with_http_info(self, request): """图像标签 自然图像的语义内容非常丰富,一个图像包含多个标签内容,图像标签服务准确识别自然图片中数百种场景、上千种通用物体及其属性,让智能相册管理、照片检索和分类、基于场景内容或者物体的广告推荐等功能更加直观。使用时用户发送待处理图片,返回图片标签内容及相应置信度。 :param RunImageTaggingRequest request :return: RunImageTaggingResponse """ all_params = ['body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1.0/image/tagging', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='RunImageTaggingResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, response_type=None, response_headers=None, auth_settings=None, collection_formats=None, request_type=None): """Makes the HTTP request and returns deserialized data. :param resource_path: Path to method endpoint. :param method: Method to call. :param path_params: Path parameters in the url. :param query_params: Query parameters in the url. :param header_params: Header parameters to be placed in the request header. :param body: Request body. :param post_params dict: Request post form parameters, for `application/x-www-form-urlencoded`, `multipart/form-data`. :param auth_settings list: Auth Settings names for the request. :param response_type: Response data type. :param response_headers: Header should be added to response data. :param collection_formats: dict of collection formats for path, query, header, and post parameters. :param request_type: Request data type. :return: Return the response directly. """ return self.do_http_request( method=method, resource_path=resource_path, path_params=path_params, query_params=query_params, header_params=header_params, body=body, post_params=post_params, response_type=response_type, response_headers=response_headers, collection_formats=collection_formats, request_type=request_type)
32.985782
135
0.653305
718
6,960
6.050139
0.229805
0.033149
0.025783
0.024171
0.536832
0.526243
0.493094
0.481123
0.481123
0.481123
0
0.001769
0.268822
6,960
210
136
33.142857
0.851837
0.243966
0
0.555556
0
0
0.077468
0.039846
0
0
0
0
0
1
0.059829
false
0
0.08547
0
0.230769
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a52fbe0941050c6bef7a51be53e3c24aa5d63bd
18,295
py
Python
frank_wolfe.py
ebezzam/PolyatomicFW_SPL
7fbbead5a642915c4bb4d061006b7dac8f6af788
[ "MIT" ]
null
null
null
frank_wolfe.py
ebezzam/PolyatomicFW_SPL
7fbbead5a642915c4bb4d061006b7dac8f6af788
[ "MIT" ]
null
null
null
frank_wolfe.py
ebezzam/PolyatomicFW_SPL
7fbbead5a642915c4bb4d061006b7dac8f6af788
[ "MIT" ]
1
2022-02-23T07:18:03.000Z
2022-02-23T07:18:03.000Z
import numpy as np from typing import Optional, Any from pandas import DataFrame from copy import deepcopy from abc import abstractmethod from utils import TimedGenericIterativeAlgorithm import pycsou.core as pcore import pycsou.linop as pl from pycsou.func.penalty import L1Norm from pycsou.func.loss import SquaredL2Loss from pycsou.opt.proxalgs import APGD class GenericFWSolverForLasso(TimedGenericIterativeAlgorithm): def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None, lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500, stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10, remove_positions: bool = False, remember_iterand: bool = False, decreasing: bool = False, multi_spikes_threshold: float = .7, multi_spikes: bool = True, reweighting: str = 'ista', t_max: float = None): self.data = data self.forwardOp = forwardOp self.stopping_strategy = stopping_strategy self.accuracy_threshold = accuracy_threshold self.multi_spikes = multi_spikes self.multi_spikes_threshold = multi_spikes_threshold self.reweighting = reweighting self.remove_positions = remove_positions self.decreasing = decreasing self.dim = self.forwardOp.shape[1] self.x0 = np.zeros(self.dim) self.dual_certificate_value = 1 / lambda_factor self.new_ind = None self.epsilon = None self.remember_iterand = remember_iterand self.iterand_history = [] init_iterand = {'iterand': self.x0, 'positions': np.array([], dtype=int)} l22_loss = (1 / 2) * SquaredL2Loss(dim=self.forwardOp.shape[0], data=self.data) self.data_fidelity = l22_loss * self.forwardOp if lambda_ is None: lambda_ = lambda_factor * np.abs(self.forwardOp.adjoint(self.data)).max() self.lambda_ = lambda_ self.penalty = self.lambda_ * L1Norm(dim=self.dim) objective_functional = self.data_fidelity + self.penalty self.bound = np.linalg.norm(self.data) ** 2 / (2 * self.lambda_) self.start = None if verbose is not None: self.candidate_new = [] self.actual_new = [] super(GenericFWSolverForLasso, self).__init__(objective_functional=objective_functional, init_iterand=init_iterand, max_iter=max_iter, min_iter=min_iter, accuracy_threshold=accuracy_threshold, verbose=verbose, t_max=t_max) def update_iterand(self) -> Any: self.compute_new_impulse() res = self.combine_new_impulse() return res def compute_new_impulse(self): dual_certificate = - self.data_fidelity.gradient(self.old_iterand['iterand']) / self.lambda_ d = np.abs(dual_certificate) if self.multi_spikes: maxi = np.max(d) if self.iter == 0: threshold = self.multi_spikes_threshold * maxi self.epsilon = (1 - self.multi_spikes_threshold) * maxi else: threshold = maxi - (1 / (self.iter + 2)) * self.epsilon indices = np.where(d > max(threshold, 1.))[0] # print("Threshold: {} / {}".format(threshold, maxi)) # print('Candidate indices: {}\n'.format(indices.shape)) self.new_ind = np.setdiff1d(indices, self.old_iterand['positions'], assume_unique=True) if self.verbose is not None: self.candidate_new.append(indices.shape[0]) self.actual_new.append(self.new_ind.size) if len(self.new_ind) == 0: self.new_ind = None self.dual_certificate_value = max(dual_certificate.min(), dual_certificate.max(), key=abs) else: self.new_ind = np.argmax(d) self.dual_certificate_value = dual_certificate[self.new_ind] if self.new_ind in self.old_iterand['positions']: self.new_ind = None # already present position if abs(self.dual_certificate_value) < 1.: if self.verbose is not None: print('Warning, dual certificate lower than 1 at iteration {}'.format(self.iter)) @abstractmethod def combine_new_impulse(self) -> Any: pass def update_diagnostics(self): """ Dual ceritificate value is computed after iteration Returns ------- """ if self.iter == 0: self.diagnostics = DataFrame( columns=['Iter', 'Relative Improvement Objective', 'Relative Improvement Iterand', 'Dual Certificate Value', 'Objective Function']) self.diagnostics.loc[self.iter, 'Iter'] = self.iter if np.linalg.norm(self.old_iterand['iterand']) == 0: self.diagnostics.loc[self.iter, 'Relative Improvement Iterand'] = np.infty else: self.diagnostics.loc[self.iter, 'Relative Improvement Iterand'] = np.linalg.norm( self.old_iterand['iterand'] - self.iterand['iterand']) / np.linalg.norm( self.old_iterand['iterand']) self.diagnostics.loc[self.iter, 'Dual Certificate Value'] = self.dual_certificate_value # before iteration self.diagnostics.loc[self.iter, 'Objective Function'] = self.objective_functional(self.iterand['iterand']) if self.iter == 0: self.diagnostics.loc[self.iter, 'Relative Improvement Objective'] = np.infty else: self.diagnostics.loc[self.iter, 'Relative Improvement Objective'] = (self.diagnostics.loc[ self.iter - 1, 'Objective Function'] - self.diagnostics.loc[ self.iter, 'Objective Function']) / \ self.diagnostics.loc[ self.iter - 1, 'Objective Function'] if self.remember_iterand: self.iterand_history.append(self.iterand['iterand']) def print_diagnostics(self): print(dict(self.diagnostics.loc[self.iter])) def stopping_metric(self): if self.iter == 0: return np.infty elif self.stopping_strategy == 'relative_improvement': return abs(self.diagnostics.loc[self.iter - 1, 'Relative Improvement Objective']) elif self.stopping_strategy == 'certificate': value = self.diagnostics.loc[self.iter - 1, 'Dual Certificate Value'] return abs(abs(value) - 1) else: raise ValueError('Stopping strategy must be in ["relative_improvement", "certificate"]') def restricted_support_lasso(self, active_indices: np.ndarray, accuracy: float, x0: np.ndarray = None, d: float = 75.): if x0 is None: x0 = np.zeros(active_indices.shape) injection = pl.sampling.SubSampling(self.dim, active_indices, dtype=float).get_adjointOp() restricted_forward = pl.DenseLinearOperator( self.forwardOp.mat[:, active_indices]) restricted_forward.compute_lipschitz_cst(tol=1e-3) restricted_data_fidelity = (1 / 2) * SquaredL2Loss(dim=restricted_forward.shape[0], data=self.data) \ * restricted_forward # restricted_data_fidelity.lipschitz_cst = self.data_fidelity.lipschitz_cst # restricted_data_fidelity.diff_lipschitz_cst = self.data_fidelity.diff_lipschitz_cst restricted_regularization = self.lambda_ * L1Norm(dim=restricted_data_fidelity.shape[1]) if self.reweighting == 'fista': acceleration = 'CD' tau = None elif self.reweighting == 'ista': tau = 1.9 / restricted_data_fidelity.diff_lipschitz_cst acceleration = None else: raise ValueError('Reweighting strategy must be in ["fista", "ista"]') solver = APGD(dim=restricted_data_fidelity.shape[1], F=restricted_data_fidelity, G=restricted_regularization, x0=x0, tau=tau, acceleration=acceleration, verbose=None, accuracy_threshold=accuracy, d=d, max_iter=2000, min_iter=1) return injection(solver.iterate()[0]['iterand']) class VanillaFWSolverForLasso(GenericFWSolverForLasso): def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None, lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500, stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10, remember_iterand: bool = False, step_size: str = 'optimal', t_max: float = None): if step_size in ['optimal', 'regular']: self.step_size = step_size else: raise ValueError("Step size strategy must be in ['optimal', 'regular']") super(VanillaFWSolverForLasso, self).__init__(data, forwardOp, lambda_=lambda_, lambda_factor=lambda_factor, min_iter=min_iter, max_iter=max_iter, stopping_strategy=stopping_strategy, accuracy_threshold=accuracy_threshold, verbose=verbose, remember_iterand=remember_iterand, multi_spikes=False, t_max=t_max) def combine_new_impulse(self) -> Any: iterand = deepcopy(self.old_iterand['iterand']) if self.new_ind is not None: new_positions = np.hstack([self.old_iterand['positions'], self.new_ind]) if self.step_size == 'optimal': gamma = np.dot(self.data_fidelity.gradient(iterand), iterand) + self.lambda_ * ( 1. * np.linalg.norm(iterand, 1) + (np.abs(self.dual_certificate_value) - 1.) * self.bound) gamma /= np.linalg.norm(self.forwardOp.mat[:, self.new_ind] * self.bound * np.sign( self.dual_certificate_value) - self.forwardOp @ iterand, 2) ** 2 else: gamma = 2/(self.iter + 3) else: new_positions = self.old_iterand['positions'] if self.step_size == 'optimal': gamma = np.dot(self.data_fidelity.gradient(iterand), iterand) + self.lambda_ * np.linalg.norm(iterand, 1) gamma /= np.linalg.norm(self.forwardOp @ iterand, 2) ** 2 else: gamma = 2/(self.iter + 3) if not 0 < gamma < 1: gamma = np.clip(gamma, 0., 1.) iterand *= (1 - gamma) if self.new_ind is not None: iterand[self.new_ind] += gamma * np.sign(self.dual_certificate_value) * self.bound return {'iterand': iterand, 'positions': new_positions} class FullyCorrectiveFWSolverForLasso(VanillaFWSolverForLasso): def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None, lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500, stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10, remember_iterand: bool = False, remove_positions: bool = False, reweighting_prec: float = 1e-4, reweighting: str = 'fista', t_max: float = None): self.remove_positions = remove_positions self.reweighting_prec = reweighting_prec super(FullyCorrectiveFWSolverForLasso, self).__init__(data, forwardOp, lambda_=lambda_, lambda_factor=lambda_factor, min_iter=min_iter, max_iter=max_iter, stopping_strategy=stopping_strategy, accuracy_threshold=accuracy_threshold, verbose=verbose, remember_iterand=remember_iterand, t_max=t_max) self.reweighting = reweighting self.last_weight = self.bound def combine_new_impulse(self) -> Any: iterand = deepcopy(self.old_iterand['iterand']) if self.new_ind is not None: new_positions = np.unique(np.hstack([self.old_iterand['positions'], self.new_ind])) if self.iter > 0 and self.remove_positions: active_indices = np.unique(np.hstack([iterand.nonzero()[0], self.new_ind])) else: active_indices = new_positions else: new_positions = self.old_iterand['positions'] if self.iter > 0 and self.remove_positions: active_indices = np.unique(iterand.nonzero()[0]) else: active_indices = new_positions if active_indices.shape[0] > 1: iterand[self.new_ind] = np.sign(self.dual_certificate_value) * self.last_weight x0 = iterand[active_indices] iterand = self.restricted_support_lasso(active_indices, self.reweighting_prec, x0=x0) if self.new_ind is not None: self.last_weight = iterand[self.new_ind] else: tmp = np.zeros(self.dim) tmp[active_indices] = 1. column = self.forwardOp(tmp) iterand[active_indices] = np.dot(self.data, column) / (np.linalg.norm(column, 2) ** 2) self.last_weight = iterand[active_indices] overvalue = np.abs(iterand) > self.bound if overvalue.sum() > 0: print("Overvalue at coordinates {}".format(np.arange(overvalue.shape[0])[overvalue])) iterand[overvalue] = np.sign(iterand[overvalue]) * self.bound return {'iterand': iterand, 'positions': new_positions} class PolyatomicFWSolverForLasso(GenericFWSolverForLasso): def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None, lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500, stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10, remove_positions: bool = False, remember_iterand: bool = False, final_reweighting_prec: float = 1e-4, init_reweighting_prec: float = .2, decreasing: bool = False, multi_spikes_threshold: float = .7, t_max: float = None): self.remove_positions = remove_positions self.reweighting_prec = init_reweighting_prec self.init_reweighting_prec = init_reweighting_prec self.decreasing = decreasing self.final_reweighting_prec = final_reweighting_prec super(PolyatomicFWSolverForLasso, self).__init__(data, forwardOp, lambda_=lambda_, lambda_factor=lambda_factor, min_iter=min_iter, max_iter=max_iter, stopping_strategy=stopping_strategy, accuracy_threshold=accuracy_threshold, verbose=verbose, remember_iterand=remember_iterand, multi_spikes=True, multi_spikes_threshold=multi_spikes_threshold, reweighting='ista', t_max=t_max) def combine_new_impulse(self): iterand = deepcopy(self.old_iterand['iterand']) if self.new_ind is not None: new_positions = np.unique(np.hstack([self.old_iterand['positions'], self.new_ind])) if self.iter > 0 and self.remove_positions: active_indices = np.unique(np.hstack([iterand.nonzero()[0], self.new_ind])) else: active_indices = new_positions else: new_positions = self.old_iterand['positions'] if self.iter > 0 and self.remove_positions: active_indices = np.unique(iterand.nonzero()[0]) else: active_indices = new_positions if active_indices.shape[0] > 1: x0 = iterand[active_indices] iterand = self.restricted_support_lasso(active_indices, self.reweighting_prec, x0=x0) else: tmp = np.zeros(self.dim) tmp[active_indices] = 1. column = self.forwardOp(tmp) iterand[active_indices] = np.dot(self.data, column) / (np.linalg.norm(column, 2) ** 2) overvalue = np.abs(iterand) > self.bound if overvalue.sum() > 0: #Sanity check, never been triggered in practice print("Overvalue at coordinates {}".format(np.arange(overvalue.shape[0])[overvalue])) iterand[overvalue] = np.sign(iterand[overvalue]) * self.bound if self.decreasing: self.reweighting_prec = self.init_reweighting_prec / (self.iter + 1) self.reweighting_prec = max(self.reweighting_prec, self.final_reweighting_prec) return {'iterand': iterand, 'positions': new_positions}
54.287834
135
0.574857
1,913
18,295
5.292211
0.111343
0.021335
0.022718
0.02825
0.586033
0.538424
0.469676
0.439154
0.406262
0.374654
0
0.012902
0.330637
18,295
336
136
54.449405
0.813817
0.023066
0
0.417544
0
0
0.055649
0.001346
0
0
0
0
0
1
0.049123
false
0.003509
0.038596
0
0.129825
0.017544
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a56848b910320fe7cdd13bea4f9b4579072e4c7
724
py
Python
fightchurn/listings/chap9/listing_9_4_regression_cparam.py
guy4261/fight-churn
f3820edd6d4af5e0bd625434d3ad4236aa781ef4
[ "MIT" ]
151
2019-04-26T19:05:14.000Z
2022-03-28T10:11:53.000Z
fightchurn/listings/chap9/listing_9_4_regression_cparam.py
guy4261/fight-churn
f3820edd6d4af5e0bd625434d3ad4236aa781ef4
[ "MIT" ]
15
2019-08-05T06:35:00.000Z
2022-03-31T02:58:30.000Z
fightchurn/listings/chap9/listing_9_4_regression_cparam.py
guy4261/fight-churn
f3820edd6d4af5e0bd625434d3ad4236aa781ef4
[ "MIT" ]
71
2019-06-07T17:50:04.000Z
2022-03-27T02:49:24.000Z
from sklearn.linear_model import LogisticRegression from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data, save_regression_model from fightchurn.listings.chap8.listing_8_2_logistic_regression import save_regression_summary, save_dataset_predictions def regression_cparam(data_set_path, C_param): X,y = prepare_data(data_set_path) retain_reg = LogisticRegression( C=C_param, penalty='l1', solver='liblinear', fit_intercept=True) retain_reg.fit(X, y) c_ext = '_c{:.3f}'.format(C_param) save_regression_summary(data_set_path,retain_reg,ext=c_ext) save_regression_model(data_set_path,retain_reg,ext=c_ext) save_dataset_predictions(data_set_path,retain_reg,X,ext=c_ext)
51.714286
119
0.825967
111
724
4.954955
0.36036
0.063636
0.1
0.123636
0.403636
0.330909
0.330909
0.330909
0.330909
0.218182
0
0.01214
0.089779
724
13
120
55.692308
0.822458
0
0
0
0
0
0.026279
0
0
0
0
0
0
1
0.090909
false
0
0.272727
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a56fc807619248f05b24361a88a0d2de688ca4d
2,156
py
Python
SM_28BYJ48/logger/logger.py
kaulketh/stepper-motor-stuff
ca7cc78279b378e5ad8e19f9c77b794a43d9a07e
[ "Unlicense" ]
null
null
null
SM_28BYJ48/logger/logger.py
kaulketh/stepper-motor-stuff
ca7cc78279b378e5ad8e19f9c77b794a43d9a07e
[ "Unlicense" ]
null
null
null
SM_28BYJ48/logger/logger.py
kaulketh/stepper-motor-stuff
ca7cc78279b378e5ad8e19f9c77b794a43d9a07e
[ "Unlicense" ]
null
null
null
#!/usr/bin/python3 # -*- coding: utf-8 -*- # ----------------------------------------------------------- # created 02.02.2021, tkaulke # Thomas Kaulke, [email protected] # https://github.com/kaulketh # ----------------------------------------------------------- __author__ = "Thomas Kaulke" __email__ = "[email protected]" import errno import logging import os from logging.config import fileConfig # runtime location this_folder = os.path.dirname(os.path.abspath(__file__)) # define log folder related to location log_folder = os.path.join(this_folder, '../logs') # define ini and log files ini_file = 'debug.ini' info_log_file = log_folder + '/info.log' error_log_file = log_folder + '/error.log' # check if exists or create log folder try: os.makedirs(log_folder, exist_ok=True) # Python>3.2 except TypeError: try: os.makedirs(log_folder) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(log_folder): pass else: raise # setup configuration config_file = os.path.join(this_folder, ini_file) fileConfig(config_file, disable_existing_loggers=True) # create handlers handler_info = logging.FileHandler(os.path.join(this_folder, info_log_file)) handler_error = logging.FileHandler(os.path.join(this_folder, error_log_file)) # set levels handler_info.setLevel(logging.INFO) handler_error.setLevel(logging.ERROR) # create formatters and add to handlers format_info = \ logging.Formatter('%(asctime)s %(levelname)s ' '[ %(module)s.%(funcName)s linenr.%(lineno)s ] ' '%(message).180s', datefmt='%Y-%m-%d %H:%M:%S') format_error = \ logging.Formatter( '%(asctime)s %(levelname)s ' '[ %(module)s.%(funcName)s linenr.%(lineno)s ] ' '[ thread: %(threadName)s ] %(message)s') handler_info.setFormatter(format_info) handler_error.setFormatter(format_error) def get_logger(name: str = __name__): logger = logging.getLogger(name) # add handler logger.addHandler(handler_info) logger.addHandler(handler_error) return logger if __name__ == '__main__': pass
28.746667
78
0.652597
274
2,156
4.905109
0.408759
0.053571
0.029762
0.041667
0.212798
0.150298
0.150298
0.09375
0.09375
0.09375
0
0.009518
0.171614
2,156
74
79
29.135135
0.743001
0.224954
0
0.133333
0
0
0.176649
0.027828
0
0
0
0
0
1
0.022222
false
0.044444
0.088889
0
0.133333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a58e531ca2dae9287cb878ce3e08653ca7ffa30
1,451
py
Python
gsheetsdb/url.py
tim-werner/gsheets-db-api
12f2a4fbe1bd5aa36781226759326ce782b08a91
[ "MIT" ]
3
2021-02-23T06:40:35.000Z
2022-03-14T23:13:10.000Z
gsheetsdb/url.py
tim-werner/gsheets-db-api
12f2a4fbe1bd5aa36781226759326ce782b08a91
[ "MIT" ]
null
null
null
gsheetsdb/url.py
tim-werner/gsheets-db-api
12f2a4fbe1bd5aa36781226759326ce782b08a91
[ "MIT" ]
null
null
null
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from collections import OrderedDict from moz_sql_parser import parse as parse_sql import pyparsing import re from six.moves.urllib import parse FROM_REGEX = re.compile(' from ("http.*?")', re.IGNORECASE) def get_url(url, headers=0, gid=0, sheet=None): parts = parse.urlparse(url) if parts.path.endswith('/edit'): path = parts.path[:-len('/edit')] else: path = parts.path path = '/'.join((path.rstrip('/'), 'gviz/tq')) qs = parse.parse_qs(parts.query) if 'headers' in qs: headers = int(qs['headers'][-1]) if 'gid' in qs: gid = qs['gid'][-1] if 'sheet' in qs: sheet = qs['sheet'][-1] if parts.fragment.startswith('gid='): gid = parts.fragment[len('gid='):] args = OrderedDict() if headers > 0: args['headers'] = headers if sheet is not None: args['sheet'] = sheet else: args['gid'] = gid params = parse.urlencode(args) return parse.urlunparse( (parts.scheme, parts.netloc, path, None, params, None)) def extract_url(sql): try: return parse_sql(sql)['from'] except pyparsing.ParseException: # fallback to regex to extract from match = FROM_REGEX.search(sql) if match: return match.group(1).strip('"')
25.45614
63
0.626465
191
1,451
4.612565
0.371728
0.045403
0.072645
0
0
0
0
0
0
0
0
0.006346
0.239835
1,451
56
64
25.910714
0.792384
0.022743
0
0.046512
0
0
0.066384
0
0
0
0
0
0
1
0.046512
false
0
0.209302
0
0.325581
0.023256
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a5afdc282108af1d03f7c2caaa0527030efeee6
5,178
py
Python
detr/datasets/construction_panoptic.py
joyjeni/detr-fine
dfc0f4abc2579a2b3ef4527904af3345c7a9de4d
[ "Apache-2.0" ]
null
null
null
detr/datasets/construction_panoptic.py
joyjeni/detr-fine
dfc0f4abc2579a2b3ef4527904af3345c7a9de4d
[ "Apache-2.0" ]
null
null
null
detr/datasets/construction_panoptic.py
joyjeni/detr-fine
dfc0f4abc2579a2b3ef4527904af3345c7a9de4d
[ "Apache-2.0" ]
null
null
null
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import json from pathlib import Path import numpy as np import torch from PIL import Image from panopticapi.utils import rgb2id # from util.box_ops import masks_to_boxes from .construction import make_construction_transforms import logging def box_xywh_to_xyxy(x): xs, ys, w, h = x.unbind(-1) b = [xs, ys, (xs + w), (ys + h)] return torch.stack(b, dim=-1) def masks_to_boxes(segments): boxes = [] labels = [] iscrowd = [] area = [] for ann in segments: if len(ann["bbox"]) == 4: boxes.append(ann["bbox"]) area.append(ann['area']) else: boxes.append([0, 0, 2, 2]) area.append(4) labels.append(ann["category_id"]) iscrowd.append(ann['iscrowd']) if len(boxes) == 0 and len(labels) == 0: boxes.append([0, 0, 2, 2]) labels.append(1) area.append(4) iscrowd.append(0) boxes = torch.tensor(boxes, dtype=torch.int64) labels = torch.tensor(labels, dtype=torch.int64) iscrowd = torch.tensor(iscrowd) area = torch.tensor(area) boxes = box_xywh_to_xyxy(boxes) return boxes, labels, iscrowd, area class ConstructionPanoptic: def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True): with open(ann_file, "r") as f: self.coco = json.load(f) # sort 'images' field so that they are aligned with 'annotations' # i.e., in alphabetical order self.coco["images"] = sorted(self.coco["images"], key=lambda x: x["id"]) # sanity check if "annotations" in self.coco: for img, ann in zip(self.coco["images"], self.coco["annotations"]): assert img["file_name"][:-4] == ann["file_name"][:-4] self.img_folder = img_folder self.ann_folder = ann_folder self.ann_file = ann_file self.transforms = transforms self.return_masks = return_masks def __getitem__(self, idx): try: ann_info = ( self.coco["annotations"][idx] if "annotations" in self.coco else self.coco["images"][idx] ) img_path = Path(self.img_folder) / ann_info["file_name"].replace(".png", ".jpg") ann_path = Path(self.ann_folder) / ann_info["file_name"] img = Image.open(img_path).convert("RGB") w, h = img.size if "segments_info" in ann_info: masks = np.asarray(Image.open(ann_path), dtype=np.uint32) masks = rgb2id(masks) ids = np.array([ann["id"] for ann in ann_info["segments_info"]]) masks = masks == ids[:, None, None] masks = torch.as_tensor(masks, dtype=torch.uint8) # labels = torch.tensor( # [ann["category_id"] for ann in ann_info["segments_info"]], # dtype=torch.int64, # ) target = {} target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]]) if self.return_masks: target['masks'] = masks boxes, labels, iscrowd, area = masks_to_boxes(ann_info["segments_info"]) target['labels'] = labels # Instead of finding boxes, just take the one from json info available # target["boxes"] = masks_to_boxes(ann_info["segments_info"]) target["boxes"] = boxes target['size'] = torch.as_tensor([int(h), int(w)]) target['orig_size'] = torch.as_tensor([int(h), int(w)]) target['iscrowd'] = iscrowd target['area'] = area # if "segments_info" in ann_info: # for name in ['iscrowd', 'area']: # target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']]) if self.transforms is not None: img, target = self.transforms(img, target) return img, target except Exception as e: logging.error(ann_info) raise e def __len__(self): return len(self.coco['images']) def get_height_and_width(self, idx): img_info = self.coco['images'][idx] height = img_info['height'] width = img_info['width'] return height, width def build(image_set, args): root = Path(args.data_path) assert ( root.exists() ), f"provided Panoptic path {root} does not exist" mode = "panoptic" PATHS = { "train": ("images", f"{mode}", f"{mode}.json"), "val": ("images", f"val_{mode}", f"val_{mode}.json"), } img_folder, ann_folder, ann_file = PATHS[image_set] img_folder_path = root / img_folder ann_folder_path = root / ann_folder ann_file = root / ann_file dataset = ConstructionPanoptic( img_folder_path, ann_folder_path, ann_file, transforms=make_construction_transforms(image_set), return_masks=args.masks, ) return dataset
30.821429
115
0.571649
653
5,178
4.358346
0.235835
0.034434
0.029515
0.03338
0.152846
0.121926
0.077653
0.068166
0.021785
0
0
0.008303
0.30224
5,178
167
116
31.005988
0.779408
0.117613
0
0.035398
0
0
0.086078
0
0
0
0
0
0.017699
1
0.061947
false
0
0.070796
0.00885
0.19469
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a5d7faf0aee2e49257e320032c83e577c7a4db4
2,994
py
Python
max_ai/src/max_ai/mem_db.py
mat-heim/max_ros
e01e4f5b2db96d94865d80452d41b8dcf1412232
[ "Apache-2.0" ]
null
null
null
max_ai/src/max_ai/mem_db.py
mat-heim/max_ros
e01e4f5b2db96d94865d80452d41b8dcf1412232
[ "Apache-2.0" ]
null
null
null
max_ai/src/max_ai/mem_db.py
mat-heim/max_ros
e01e4f5b2db96d94865d80452d41b8dcf1412232
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python ''' memory class stored in sqlite data base holds raw input and memories in parse taged columns ''' import sys import re import sqlite3 import os from datetime import date, datetime from pattern.en import parse from pattern.en import pprint from pattern.en import parsetree from pattern.en import wordnet from pattern.en import pluralize, singularize from pattern.en import conjugate, lemma, lexeme #dir = os.path.dirname(os.path.abspath(__file__)) dir = '/home/erni/catkin_ws/src/max_ros/max_ai/src/max_ai/' RM = sqlite3.connect(dir +'robbie_memory.sqlite') #RM = sqlite3.connect(dir + '/data/robbie_memory.db') cursor = RM.cursor() # Information about a single concept class conceptClass: def __init__(self, state='none', locality='none'): self.state = state # what/how is 'concept' self.reference = 'none' # unused self.locality = locality # where is 'concept' self.person = '3sg' # e.g. a thing is 3rd-person, singular self.isProperNoun = False # True if proper noun: e.g. Robert self.properties = {} # Dict of custom properties, e.g. 'age' = 39, 'color' = 'blue' # Robbie memory class. Collection of concepts class memoryClass(): def __init__(self): self.concepts = {} self.person = {'I': '1sg', 'you': '2sg' } self.posessivePronouns = {'1sg': 'my', '2sg': 'your', '3sg': 'its' } # Add a concept to memory def add(self, c): # add oncept to raw_input table in robbie_memory # x= # dt = datetime.now() # RM.execute("insert into RAW_INPUT (RAW, DATE) values (?, ?)",(c, dt)) # RM.commit() self.concepts[c] = conceptClass() if c in self.person: self.concepts[c].person = self.person[c] else: self.concepts[c].person = '3sg' # Return True if concept 'c' (string) is in memory def known(self, c): cursor.execute('''SELECT concept, location FROM memory WHERE concept =?''', (c,)) user = cursor.fetchone() # if user == 'None': return user def add_memory(self, a, b): c = '3sg' dt = datetime.now() RM.execute("insert into memory (concept, location, person,DATE) values (?, ?, ?, ?)", (a, b, c, dt)) RM.commit() def update_memory(self, a, b): cursor.execute('''UPDATE memory SET location = ? WHERE concept = ? ''', (b, a)) RM.commit() def search_memory(self, a): cursor.execute('''SELECT concept,location, person FROM memory WHERE concept =?''', (a,)) user = cursor.fetchone() return user def search_profile(self, a): cursor.execute('''SELECT value FROM profile WHERE item =?''', (a,)) user = cursor.fetchone() return user def Dump(self): return (self.concepts.state)
31.1875
108
0.58684
375
2,994
4.618667
0.357333
0.038106
0.045035
0.06582
0.129908
0.073903
0.073903
0
0
0
0
0.006524
0.283233
2,994
95
109
31.515789
0.800559
0.237475
0
0.133333
0
0
0.174202
0.022606
0
0
0
0
0
1
0.15
false
0
0.183333
0.016667
0.433333
0.016667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a5e25995315baeb1a8d9bd6a0b259803f947416
1,768
py
Python
examples/pylab_examples/image_masked.py
pierre-haessig/matplotlib
0d945044ca3fbf98cad55912584ef80911f330c6
[ "MIT", "PSF-2.0", "BSD-3-Clause" ]
16
2016-06-14T19:45:35.000Z
2020-11-30T19:02:58.000Z
examples/pylab_examples/image_masked.py
pierre-haessig/matplotlib
0d945044ca3fbf98cad55912584ef80911f330c6
[ "MIT", "PSF-2.0", "BSD-3-Clause" ]
7
2015-05-08T19:36:25.000Z
2015-06-30T15:32:17.000Z
examples/pylab_examples/image_masked.py
pierre-haessig/matplotlib
0d945044ca3fbf98cad55912584ef80911f330c6
[ "MIT", "PSF-2.0", "BSD-3-Clause" ]
6
2015-06-05T03:34:06.000Z
2022-01-25T09:07:10.000Z
#!/usr/bin/env python '''imshow with masked array input and out-of-range colors. The second subplot illustrates the use of BoundaryNorm to get a filled contour effect. ''' from pylab import * from numpy import ma import matplotlib.colors as colors delta = 0.025 x = y = arange(-3.0, 3.0, delta) X, Y = meshgrid(x, y) Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0) Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1) Z = 10 * (Z2-Z1) # difference of Gaussians # Set up a colormap: palette = cm.gray palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we could use # palette.set_bad(alpha = 0.0) # to make the bad region transparent. This is the default. # If you comment out all the palette.set* lines, you will see # all the defaults; under and over will be colored with the # first and last colors in the palette, respectively. Zm = ma.masked_where(Z > 1.2, Z) # By setting vmin and vmax in the norm, we establish the # range to which the regular palette color scale is applied. # Anything above that range is colored based on palette.set_over, etc. subplot(1,2,1) im = imshow(Zm, interpolation='bilinear', cmap=palette, norm = colors.Normalize(vmin = -1.0, vmax = 1.0, clip = False), origin='lower', extent=[-3,3,-3,3]) title('Green=low, Red=high, Blue=bad') colorbar(im, extend='both', orientation='horizontal', shrink=0.8) subplot(1,2,2) im = imshow(Zm, interpolation='nearest', cmap=palette, norm = colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=256, clip = False), origin='lower', extent=[-3,3,-3,3]) title('With BoundaryNorm') colorbar(im, extend='both', spacing='proportional', orientation='horizontal', shrink=0.8) show()
31.571429
70
0.673643
300
1,768
3.943333
0.453333
0.013525
0.010144
0.02874
0.138631
0.059172
0.059172
0.059172
0.059172
0.059172
0
0.051176
0.182127
1,768
55
71
32.145455
0.766943
0.381787
0
0.129032
0
0
0.106942
0
0
0
0
0
0
1
0
false
0
0.096774
0
0.096774
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a5ef02906722fedfa7e1972d812a70076441239
1,548
py
Python
meme/meme.py
aniket091/modmail-plugins-1
4360ff885f27e5c9488ea5cf9431aff20435209b
[ "MIT" ]
8
2020-01-03T19:01:59.000Z
2021-04-14T13:30:49.000Z
meme/meme.py
aniket091/modmail-plugins-1
4360ff885f27e5c9488ea5cf9431aff20435209b
[ "MIT" ]
4
2020-12-22T12:51:03.000Z
2022-01-05T20:17:00.000Z
meme/meme.py
aniket091/modmail-plugins-1
4360ff885f27e5c9488ea5cf9431aff20435209b
[ "MIT" ]
27
2020-01-17T18:05:29.000Z
2022-02-04T07:38:52.000Z
import discord from discord.ext import commands import requests import random from box import Box class WildMemes(commands.Cog): """ Randomly spawns memes. """ subreddits = [ "dankmemes", "wholesomememes", "memes", "terriblefacebookmemes", "historymemes", "me_irl", "2meirl4meirl", "fellowkids", "tumblr" ] def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_message(self, message): if message.author == self.bot.user: return elif random.randint(0, 100) < 25: async with message.channel.typing(): chosen_sub = random.choice(self.subreddits) r = requests.get(f"https://api.reddit.com/r/{chosen_sub}/top.json?sort=top&t=day&limit=500", headers={'User-agent': 'Super Bot 9000'}) r = r.json() boxed = Box(r) data = (random.choice(boxed.data.children)).data image = data.url upvotes = data.ups title = data.title subreddit = data.subreddit_name_prefixed embed = discord.Embed(title=f'Meme Title: {title}', color=0x6bdcd7) embed.set_author(name="A wild meme has appeared!") embed.set_image(url=image) embed.set_footer(text=f"On {subreddit} with {upvotes} upvotes.") await message.channel.send(embed=embed) def setup(bot): bot.add_cog(WildMemes(bot))
30.352941
108
0.566537
173
1,548
4.988439
0.526012
0.024334
0
0
0
0
0
0
0
0
0
0.016997
0.315891
1,548
51
109
30.352941
0.797923
0.014212
0
0
0
0.02381
0.180013
0.013898
0
0
0.005295
0
0
1
0.047619
false
0
0.119048
0
0.238095
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a62f6ea092332203dc81ebef45e051b04506ddf
12,246
py
Python
Moodle/scripts/edit_conf.py
nii-gakunin-cloud/ocs-templates
a2a39bb8824d489488af3c3972007317bb1ef6a2
[ "BSD-3-Clause" ]
4
2020-05-11T06:30:53.000Z
2022-01-26T03:31:55.000Z
Moodle/scripts/edit_conf.py
nii-gakunin-cloud/ocs-templates
a2a39bb8824d489488af3c3972007317bb1ef6a2
[ "BSD-3-Clause" ]
1
2021-06-17T01:34:27.000Z
2021-06-17T01:34:27.000Z
Moodle/scripts/edit_conf.py
nii-gakunin-cloud/ocs-templates
a2a39bb8824d489488af3c3972007317bb1ef6a2
[ "BSD-3-Clause" ]
3
2020-09-08T00:57:52.000Z
2022-01-18T10:42:22.000Z
from datetime import datetime from difflib import unified_diff from logging import basicConfig, getLogger, INFO import os from pathlib import Path import shutil import subprocess import sys import yaml from urllib.parse import urlparse from notebook import notebookapp from IPython.core.display import HTML WORKDIR = 'edit' META_YML = '.vcp-meta.yml' MOODLE_DIR = '/opt/moodle' CONF_RELATIVE = '/etc' ENV_INHERIT = ['VAULT_ADDR', 'VAULT_TOKEN', 'PATH', 'REQUESTS_CA_BUNDLE'] logger = getLogger(__name__) basicConfig(level=INFO, format='%(message)s') def generate_local_path(host, conf_path, version=None): ret = Path(WORKDIR).absolute() / host if version is None: ret /= datetime.now().strftime("%Y%m%d%H%M%S%f") else: ret /= version ret /= Path(conf_path).name return ret def generate_remote_path(container, conf_path, relative_to=CONF_RELATIVE): return (Path(MOODLE_DIR) / container / 'conf' / Path(conf_path).relative_to(relative_to)) def get_local_path(host, container, conf_path, version=None): if version is None: version = find_latest_version(host, container, conf_path) return generate_local_path(host, conf_path, version) def _match_metainfo(parent, container, conf_path): p = parent / META_YML if not p.exists(): return False with p.open() as f: params = yaml.safe_load(f) return ( isinstance(params, dict) and 'container' in params and 'container_path' in params and params['container'] == container and params['container_path'] == conf_path) def _match_metainfo_by_remote_path(parent, remote_path): p = parent / META_YML if not p.exists(): return False with p.open() as f: params = yaml.safe_load(f) return ( isinstance(params, dict) and 'remote_path' in params and params['remote_path'] == remote_path) def get_versions(host, *args, match=_match_metainfo): pdir = Path(WORKDIR).absolute() / host return sorted([ x.name for x in pdir.glob('*') if x.is_dir() and match(x, *args)]) def find_latest_version(host, container, conf_path): return get_versions(host, container, conf_path)[-1] def find_latest_version_by_remote_path(host, remote_path): return get_versions( host, remote_path, match=_match_metainfo_by_remote_path)[-1] def download_file(host, remote_path, conf_path=None): if conf_path is None: conf_path = Path(remote_path).name dest = generate_local_path(host, conf_path) ansible_arg = f'src={remote_path} dest={dest} flat=yes' out = subprocess.check_output( ['ansible', host, '-m', 'fetch', '-a', ansible_arg]) host_1 = out.decode('utf-8').split("\n")[0].split()[0] logger.info(f'Downloading {remote_path} from {host_1} to {dest}') return dest def download_conf_file(host, container, conf_path, relative_to=CONF_RELATIVE): src = generate_remote_path(container, conf_path, relative_to) return download_file(host, src, conf_path) def create_conf_file(host, conf_path): dest = generate_local_path(host, conf_path) dest.parent.mkdir(parents=True, exist_ok=True) dest.touch() return dest def _to_backup(conf): return conf.parent / (conf.name + '.orig') def make_backup(conf, quiet=False): org = _to_backup(conf) if not quiet: logger.info(f'Copy {conf} {org}') shutil.copy2(conf, org) def make_metainfo(local_path, container, conf_path, relative_to=CONF_RELATIVE): params = { 'container': container, 'container_path': conf_path, 'remote_path': str(generate_remote_path(container, conf_path, relative_to)), 'version': list(local_path.parts)[-2], } with (local_path.parent / META_YML).open(mode='w') as f: yaml.safe_dump(params, stream=f, default_flow_style=False) def make_simple_metainfo(local_path, remote_path): params = { 'remote_path': remote_path, 'version': list(local_path.parts)[-2], } with (local_path.parent / META_YML).open(mode='w') as f: yaml.safe_dump(params, stream=f, default_flow_style=False) def generate_edit_link(conf): nb_conf = list(notebookapp.list_running_servers())[0] p = (Path(nb_conf['base_url']) / 'edit' / conf.absolute().relative_to(nb_conf['notebook_dir'])) return HTML(f'<a href={p} target="_blank">{p.name}</a>') def show_diff(path_a, path_b): lines_a = [] lines_b = [] with path_a.open() as f: lines_a = f.readlines() with path_b.open() as f: lines_b = f.readlines() diff = list(unified_diff( lines_a, lines_b, fromfile=path_a.name, tofile=path_b.name)) sys.stdout.writelines(diff) return len(diff) def upload_conf_file(src, host, container, conf_path, relative_to=CONF_RELATIVE): dest = generate_remote_path(container, conf_path, relative_to) ansible_arg = f'mkdir -p {dest.parent}' subprocess.run( ['ansible', host, '-a', ansible_arg]) ansible_arg = f'dest={dest} src={src} backup=yes' out = subprocess.check_output( ['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg]) host_1 = out.decode('utf-8').split("\n")[0].split()[0] logger.info(f'Uploading {dest} from {src} to {host_1}') def restart_container(host, container): cmd = f'chdir={MOODLE_DIR} docker-compose restart {container}' logger.info(f'Restart container {container}') subprocess.check_call(['ansible', host, '-a', cmd]) def fetch_conf(host, container, conf_path, relative_to=CONF_RELATIVE, create=False): local_path = download_conf_file(host, container, conf_path, relative_to) make_backup(local_path) make_metainfo(local_path, container, conf_path, relative_to) return generate_edit_link(local_path) def create_conf(host, container, conf_path, relative_to=CONF_RELATIVE, create=False): local_path = create_conf_file(host, conf_path) make_backup(local_path, quiet=True) make_metainfo(local_path, container, conf_path, relative_to) return generate_edit_link(local_path) def apply_conf(host, container, conf_path, relative_to=CONF_RELATIVE, version=None, restart=True): diff = show_local_conf_diff(host, container, conf_path, version) local_path = get_local_path(host, container, conf_path, version) upload_conf_file(local_path, host, container, conf_path, relative_to) if restart: restart_container(host, container) def revert_conf(host, container, conf_path, relative_to=CONF_RELATIVE, version=None): local_path = get_local_path(host, container, conf_path, version) backup_path = _to_backup(local_path) show_diff(local_path, backup_path) upload_conf_file(backup_path, host, container, conf_path, relative_to) restart_container(host, container) local_path.rename(local_path.parent / (local_path.name + '.revert')) def show_local_conf(host, container, conf_path, relative_to=CONF_RELATIVE, version=None): conf = get_local_path(host, container, conf_path, version) with conf.open() as f: print(f.read()) def edit_local_conf(host, container, conf_path, relative_to=CONF_RELATIVE, version=None): conf = get_local_path(host, container, conf_path, version) return generate_edit_link(conf) def show_local_conf_diff(host, container, conf_path, version=None): local_path = get_local_path(host, container, conf_path, version) show_diff(_to_backup(local_path), local_path) def save_shibboleth_part(conf_path): with conf_path.open() as f: data = yaml.safe_load(f) params = {} if 'shibboleth' in data['services']: params['shibboleth_container'] = yaml.safe_dump( data['services']['shibboleth']) vars_path = conf_path.parent / 'extra_vars.yml' with vars_path.open(mode='w') as f: yaml.safe_dump(params, f) return vars_path def init_shibboleth_part(conf_dir, hostname, volumes): shibboleth_volumes = ['/sys/fs/cgroup:/sys/fs/cgroup'] shibboleth_volumes.extend(volumes) params = { 'shibboleth_container': yaml.safe_dump({ 'image': 'harbor.vcloud.nii.ac.jp/vcp/moodle:shibboleth-3.0.4', 'privileged': True, 'ports': ['443:443'], 'volumes': shibboleth_volumes, 'container_name': 'shibboleth', 'hostname': hostname, }), } vars_path = conf_dir / 'shibboleth.yml' with vars_path.open(mode='w') as f: yaml.safe_dump(params, f) return vars_path def setup_shibboleth_part(local_path, **params): if params is None or len(params) == 0: return save_shibboleth_part(local_path) else: return init_shibboleth_part(local_path.parent, **params) def generate_docker_compose(host, conf_path, extra_vars, extra_vars_file): template = 'template/docker/compose/docker-compose.yml' ansible_arg = f'src={template} dest={conf_path.parent}/' env = dict([(x, os.environ[x]) for x in ENV_INHERIT]) args = ['ansible', host, '-m', 'template', '-c', 'local', '-a', ansible_arg] for k, v in extra_vars.items(): args.extend(['-e', f'{k}={v}']) for x in extra_vars_file: args.extend(['-e', f'@{str(x)}']) subprocess.run(args=args, env=env, check=True) def update_docker_compose(host, extra_vars={}, shibboleth_params={}): remote_path = MOODLE_DIR + '/docker-compose.yml' local_path = download_file(host, remote_path) make_backup(local_path) make_simple_metainfo(local_path, remote_path) shibboleth_vars = setup_shibboleth_part(local_path, **shibboleth_params) generate_docker_compose(host, local_path, extra_vars, [shibboleth_vars]) show_diff(_to_backup(local_path), local_path) return generate_edit_link(local_path) def append_shibboleth_container(host, moodle_url, volumes=[], extra_vars={}): hostname = urlparse(moodle_url).netloc return update_docker_compose( host, extra_vars, shibboleth_params={'hostname': hostname, 'volumes': volumes}, ) def upload_docker_compose(host, version=None, apply=False): remote_path = MOODLE_DIR + '/docker-compose.yml' if version is None: version = find_latest_version_by_remote_path(host, remote_path) local_path = ( Path(WORKDIR).absolute() / host / version / 'docker-compose.yml') ansible_arg = f'dest={remote_path} src={local_path} backup=yes' out = subprocess.check_output( ['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg]) host_1 = out.decode('utf-8').split("\n")[0].split()[0] logger.info(f'Uploading {remote_path} from {local_path} to {host_1}') if not apply: return ansible_arg = f'chdir=/opt/moodle docker-compose up -d --remove-orphans' args = ['ansible', host, '-a', ansible_arg] logger.info('Apply the changes in docker-compose.yml.') subprocess.run(args=args, check=True) def generate_proxy_conf(host, conf_path, extra_vars): template = 'template/docker/compose/moodle-proxy.conf.template' ansible_arg = f'src={template} dest={conf_path.parent}/moodle-proxy.conf' env = dict([(x, os.environ[x]) for x in ENV_INHERIT]) args = [ 'ansible', host, '-m', 'template', '-c', 'local', '-a', ansible_arg] for k, v in extra_vars.items(): args.extend(['-e', f'{k}={v}']) subprocess.run(args=args, env=env, check=True) def update_proxy_conf(host, extra_vars={}): conf_path = Path('/usr/local/apache2/conf/moodle-proxy.conf') container = 'proxy' link = fetch_conf(host, container, str(conf_path), str(conf_path.parent)) version = find_latest_version(host, container, str(conf_path)) local_path = generate_local_path(host, conf_path, version) generate_proxy_conf(host, local_path, extra_vars) show_local_conf_diff(host, container, conf_path, version) return link def apply_proxy_conf(host, version=None, restart=True): conf_path = Path('/usr/local/apache2/conf/moodle-proxy.conf') apply_conf(host, 'proxy', str(conf_path), str(conf_path.parent), version, restart)
34.59322
79
0.682182
1,702
12,246
4.645711
0.13161
0.061717
0.0688
0.061085
0.543063
0.480713
0.45289
0.398761
0.310484
0.255343
0
0.003326
0.189695
12,246
353
80
34.691218
0.79351
0
0
0.286232
0
0.003623
0.127715
0.028336
0
0
0
0
0
1
0.130435
false
0
0.043478
0.014493
0.271739
0.003623
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a63b2be4d7b2116c7bb45a2e0a6f93a06e01c5e
959
py
Python
other/minimum_edit_distance.py
newvicklee/nlp_algorithms
d2812398d96d345dcb50970bae6ebbf666ea5380
[ "MIT" ]
null
null
null
other/minimum_edit_distance.py
newvicklee/nlp_algorithms
d2812398d96d345dcb50970bae6ebbf666ea5380
[ "MIT" ]
null
null
null
other/minimum_edit_distance.py
newvicklee/nlp_algorithms
d2812398d96d345dcb50970bae6ebbf666ea5380
[ "MIT" ]
null
null
null
""" Minimum edit distance computes the cost it takes to get from one string to another string. This implementation uses the Levenshtein distance with a cost of 1 for insertions or deletions and a cost of 2 for substitutions. Resource: https://en.wikipedia.org/wiki/Edit_distance For example, getting from "intention" to "execution" is a cost of 8. minimum_edit_distance("intention", "execution") # 8 """ def minimum_edit_distance(source, target): n = len(source) m = len(target) D = {} # Initialization for i in range(0, n+1): D[i,0] = i for j in range(0, m+1): D[0,j] = j for i in range(1, n+1): for j in range(1, m+1): if source[i-1] == target[j-1]: D[i,j] = D[i-1, j-1] else: D[i,j] = min( D[i-1, j] + 1, D[i, j-1] + 1, D[i-1, j-1] + 2 ) return D[n-1, m-1]
28.205882
129
0.535975
156
959
3.262821
0.371795
0.027505
0.023576
0.023576
0.045187
0
0
0
0
0
0
0.042587
0.338895
959
33
130
29.060606
0.760252
0.432742
0
0
0
0
0
0
0
0
0
0
0
1
0.052632
false
0
0
0
0.105263
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a65447ee836106ce8cee612e580a711dcd38121
7,219
py
Python
varifier/dnadiff.py
iqbal-lab-org/varifier
718a787fd8490ea33a79b5095884e66e12106399
[ "MIT" ]
11
2020-04-06T11:22:50.000Z
2021-11-12T18:09:41.000Z
varifier/dnadiff.py
martinghunt/varifier
9f05477b5e48e96264c392fbd14ca98d1ed86e48
[ "MIT" ]
17
2020-04-01T15:19:55.000Z
2021-11-12T05:07:01.000Z
varifier/dnadiff.py
martinghunt/varifier
9f05477b5e48e96264c392fbd14ca98d1ed86e48
[ "MIT" ]
3
2020-04-01T10:41:27.000Z
2020-08-05T06:27:21.000Z
from operator import attrgetter import logging import os import shutil import subprocess import pyfastaq import pymummer from cluster_vcf_records import vcf_record from varifier import utils # We only want the .snps file from the dnadiff script from MUMmer. From reading # the docs inspecting that script, we need to run these commands: # # nucmer --maxmatch --delta out.delta ref.fasta query.fasta # delta-filter -1 out.delta > out.1delta # show-snps -rlTHC out.1delta > out.snps # # This is instead of just running show-snps, which runs several other commands # in addition to making the snps file. def _run_dnadiff_one_split(ref_fasta, query_fasta, outfile, threads=1, maxmatch=True): delta = f"{outfile}.tmp.delta" delta_1 = f"{outfile}.tmp.1delta" subprocess.check_output(f"rm -f {delta} {delta_1}", shell=True) maxmatch_opt = "--maxmatch" if maxmatch else "" commands = [ f"nucmer --threads {threads} {maxmatch_opt} --delta {delta} {ref_fasta} {query_fasta}", f"delta-filter -1 {delta} > {delta_1}", f"show-snps -rlTHC {delta_1} > {outfile}", ] for command in commands: logging.info("Start run command: " + command) subprocess.check_output(command, shell=True) logging.info("Finish run command: " + command) os.unlink(delta) os.unlink(delta_1) def _run_dnadiff( ref_fasta, query_fasta, outfile, split_query=False, debug=False, threads=1, maxmatch=True, ): if not split_query: _run_dnadiff_one_split( ref_fasta, query_fasta, outfile, threads=threads, maxmatch=maxmatch ) else: tmp_snp_files = [] seq_reader = pyfastaq.sequences.file_reader(query_fasta) for seq in seq_reader: prefix = f"{outfile}.tmp.split.{len(tmp_snp_files)}" tmp_fasta = f"{prefix}.fasta" with open(tmp_fasta, "w") as f: print(seq, file=f) snp_file = f"{prefix}.snps" _run_dnadiff_one_split( ref_fasta, tmp_fasta, snp_file, threads=threads, maxmatch=maxmatch ) os.unlink(tmp_fasta) tmp_snp_files.append(snp_file) with open(outfile, "wb") as f_out: for snp_file in tmp_snp_files: with open(snp_file, "rb") as f_in: shutil.copyfileobj(f_in, f_out) if not debug: os.unlink(snp_file) def _snps_file_to_vcf(snps_file, query_fasta, outfile): """Loads the .snps file made by dnadiff. query_fasta = fasta file of query sequences. Writes a new VCF file unmerged records.""" vcf_records = {} variants = pymummer.snp_file.get_all_variants(snps_file) query_seqs = utils.file_to_dict_of_seqs(query_fasta) for variant in variants: # If the variant is reversed, it means that either the ref or query had to be # reverse complemented when aligned by mummer. Need to do the appropriate # reverse (complement) fixes so the VCF has the correct REF and ALT sequences if variant.reverse: qry_seq = pyfastaq.sequences.Fasta("x", variant.qry_base) qry_seq.revcomp() variant.qry_base = "".join(reversed(qry_seq.seq)) ref_seq = pyfastaq.sequences.Fasta("x", variant.ref_base) ref_seq.revcomp() variant.ref_base = ref_seq.seq if variant.var_type == pymummer.variant.SNP: new_record = vcf_record.VcfRecord( "\t".join( [ variant.qry_name, str(variant.qry_start + 1), ".", variant.qry_base, variant.ref_base, ".", ".", "SVTYPE=DNADIFF_SNP", "GT", "1/1", ] ) ) elif variant.var_type == pymummer.variant.DEL: # The query has sequence missing, compared to the # reference. We're making VCF records w.r.t. the # query, so this is an insertion. So need to # get the nucleotide before the insertion as well. new_record = vcf_record.VcfRecord( "\t".join( [ variant.qry_name, str(variant.qry_start + 1), ".", query_seqs[variant.qry_name][variant.qry_start], query_seqs[variant.qry_name][variant.qry_start] + variant.ref_base, ".", ".", "SVTYPE=DNADIFF_INS", "GT", "1/1", ] ) ) elif variant.var_type == pymummer.variant.INS: # The ref has sequence missing, compared to the # query. We're making VCF records w.r.t. the # query, so this is a deletion. So need to # get the nucleotide before the deletion as well. new_record = vcf_record.VcfRecord( "\t".join( [ variant.qry_name, str(variant.qry_start), ".", query_seqs[variant.qry_name][variant.qry_start - 1] + variant.qry_base, query_seqs[variant.qry_name][variant.qry_start - 1], ".", ".", "SVTYPE=DNADIFF_DEL", "GT", "1/1", ] ) ) else: raise Exception("Unknown variant type: " + str(variant)) assert ( new_record.REF == query_seqs[new_record.CHROM][ new_record.POS : new_record.POS + len(new_record.REF) ] ) if new_record.CHROM not in vcf_records: vcf_records[new_record.CHROM] = [] vcf_records[new_record.CHROM].append(new_record) for vcf_list in vcf_records.values(): vcf_list.sort(key=attrgetter("POS")) with open(outfile, "w") as f: print("##fileformat=VCFv4.2", file=f) for seq in query_seqs.values(): print(f"##contig=<ID={seq.id},length={len(seq)}>", file=f) print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tsample", file=f) for key, vcf_list in sorted(vcf_records.items()): for record in vcf_list: print(record, file=f) def make_truth_vcf( ref_fasta, truth_fasta, outfile, debug=False, split_ref=False, threads=1, maxmatch=True, ): snps_file = f"{outfile}.tmp.snps" _run_dnadiff( truth_fasta, ref_fasta, snps_file, split_query=split_ref, debug=debug, threads=threads, maxmatch=maxmatch, ) _snps_file_to_vcf(snps_file, ref_fasta, outfile) if not debug: os.unlink(snps_file)
34.37619
95
0.543289
848
7,219
4.430425
0.222877
0.047911
0.026085
0.023955
0.325526
0.240085
0.187916
0.180463
0.156774
0.126431
0
0.005423
0.361407
7,219
209
96
34.54067
0.809544
0.15293
0
0.283133
0
0
0.097008
0.023183
0
0
0
0
0.006024
1
0.024096
false
0
0.054217
0
0.078313
0.03012
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a6616a10563e4ebc6f0a75abad1fbf54a72a196
2,776
py
Python
queryfilter/datetimefilter.py
iCHEF/queryfilter
0ae4faf525e162d2720d328b96fa179d68277f1e
[ "Apache-2.0" ]
4
2018-05-11T18:07:32.000Z
2019-07-30T13:38:49.000Z
queryfilter/datetimefilter.py
iCHEF/queryfilter
0ae4faf525e162d2720d328b96fa179d68277f1e
[ "Apache-2.0" ]
6
2018-02-26T04:46:36.000Z
2019-04-10T06:17:12.000Z
queryfilter/datetimefilter.py
iCHEF/queryfilter
0ae4faf525e162d2720d328b96fa179d68277f1e
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import import datetime from dateutil import parser import pytz from .base import FieldFilter, DictFilterMixin, DjangoQueryFilterMixin from .queryfilter import QueryFilter WHOLE_DAY = datetime.timedelta(days=1) ONE_SECOND = datetime.timedelta(seconds=1) @QueryFilter.register_type_condition('datetime') class DatetimeRangeFilter(DjangoQueryFilterMixin, DictFilterMixin, FieldFilter): @property def start(self): return get_start(self.filter_args.get("start")) @property def end(self): end_datetime = get_end(self.filter_args.get("end")) if not end_datetime: return None if _has_no_time_info(end_datetime): end_datetime = end_datetime + WHOLE_DAY - ONE_SECOND return end_datetime def on_dicts(self, dicts): def in_range(datum): datetime_string = self.get(datum, self.field_name) if isinstance(datetime_string, datetime.datetime): to_compare = datetime_string else: to_compare = parse(datetime_string) if not self.start and not self.end: return False if self.start and (to_compare < self.start): return False if self.end and (self.end < to_compare): return False return True return list(filter(in_range, dicts)) @property def query_params(self): if not any((self.start, self.end)): return None query_params = dict() if self.start: query_params["{}__gte".format(self.field_name)] = self.start if self.end: query_params["{}__lte".format(self.field_name)] = self.end return query_params def _do_django_query(self, queryset): query_params = self.query_params if query_params: return queryset.filter(**query_params) else: return queryset.none() min_datetime = datetime.datetime.min.replace(tzinfo=pytz.utc) max_datetime = datetime.datetime.max.replace(tzinfo=pytz.utc) def get_start(start_date_str): if not start_date_str: return None return parse(start_date_str) def get_end(end_date_str): if not end_date_str: return None return parse(end_date_str) def parse(datetime_string): return make_time_aware(parser.parse(datetime_string)) def make_time_aware(datetime_data): if not datetime_data.tzinfo: datetime_data = datetime_data.replace(tzinfo=pytz.utc) return datetime_data def _has_no_time_info(value): return value.hour == 0 and \ value.minute == 0 and \ value.second == 0 and \ value.microsecond == 0
25.46789
72
0.648055
342
2,776
5.008772
0.245614
0.057793
0.022767
0.035026
0.059545
0.032691
0
0
0
0
0
0.002966
0.271254
2,776
108
73
25.703704
0.843796
0
0
0.162162
0
0
0.010807
0
0
0
0
0
0
1
0.148649
false
0
0.081081
0.040541
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a6634c8b3d57a247c912406564142afedbbeba0
13,829
py
Python
built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/search_space/networks/pytorch/operator/rpn.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
12
2020-12-13T08:34:24.000Z
2022-03-20T15:17:17.000Z
built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/search_space/networks/pytorch/operator/rpn.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
3
2021-03-31T20:15:40.000Z
2022-02-09T23:50:46.000Z
built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/search_space/networks/pytorch/operator/rpn.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
2
2021-07-10T12:40:46.000Z
2021-12-17T07:55:15.000Z
# -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """Import all torch operators.""" import torch.nn.functional as F import torch.nn as nn import torch from vega.search_space.networks.network_factory import NetworkFactory from vega.search_space.networks.net_utils import NetTypes from vega.search_space.networks.pytorch.utils.anchor_utils.anchor_target import AnchorTarget from vega.search_space.networks.pytorch.utils.bbox_utils.anchor_generator import AnchorGenerator from vega.core.common.config import Config from functools import partial import numpy as np from six.moves import map, zip from vega.search_space.networks.pytorch.losses.reduce_loss import weighted_loss @NetworkFactory.register(NetTypes.Operator) class RpnClsLossInput(nn.Module): """Rpn input.""" def __init__(self): super(RpnClsLossInput, self).__init__() def forward(self, x): """Get cls score and bbox preds.""" cls_scores = x[0] bbox_preds = x[1] return cls_scores, bbox_preds @NetworkFactory.register(NetTypes.Operator) class RpnLossInput(nn.Module): """Rpn loss input.""" def __init__(self): super(RpnLossInput, self).__init__() def forward(self, x): """Get cls score.""" cls_scores = x[2][0] bbox_preds = x[2][1] gt_bboxes = x[0]['gt_bboxes'].cuda() img_metas = [x[0]['img_meta']] gt_bboxes_ignore = x[0]['gt_bboxes_ignore'].cuda() return cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore @NetworkFactory.register(NetTypes.Operator) class AnchorTargetOp(nn.Module): """Anchor Target.""" def __init__(self, target_means=None, target_stds=None, num_classes=2, use_sigmoid_cls=False, cfg=None, sampling=True): self.target_means = target_means or (.0, .0, .0, .0) self.target_stds = target_stds or (1.0, 1.0, 1.0, 1.0) self.label_channels = num_classes if use_sigmoid_cls else 1 self.cfg = Config({'assigner': {'name': 'MaxIoUAllNegAssigner', 'pos_iou_thr': 0.7, 'neg_iou_thr': tuple([-1, 0.3]), 'min_pos_iou': 0.3, 'ignore_iof_thr': 0.5}, 'sampler': {'name': 'RandomSampler', 'num': 256, 'pos_fraction': 0.5, 'neg_pos_ub': -1, 'add_gt_as_proposals': False}, 'allowed_border': 0, 'pos_weight': -1, 'debug': False}) self.sampling = sampling super(AnchorTargetOp, self).__init__() def forward(self, x): """Create X=(anchor_list,valid_flag_list,gt_bboxes,img_metas,).""" anchor_list, valid_flag_list, original_anchors, gt_bboxes, img_metas, gt_bboxes_ignore = x # out=(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos,num_total_neg). return AnchorTarget(anchor_list, valid_flag_list, gt_bboxes, img_metas, self.target_means, self.target_stds, self.cfg, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=None, label_channels=self.label_channels, sampling=self.sampling) @NetworkFactory.register(NetTypes.Operator) class Anchors(nn.Module): """Get anchors according to feature map sizes.""" def __init__(self, anchor_base_sizes_cfg=None, anchor_scales=None, anchor_ratios=None, anchor_strides=None): self.anchor_base_sizes_cfg = anchor_base_sizes_cfg self.anchor_scales = anchor_scales or [8, 16, 32] self.anchor_ratios = anchor_ratios or [0.5, 1.0, 2.0] self.anchor_strides = anchor_strides or [4, 8, 16, 32, 64] self.anchor_base_sizes = list( self.anchor_strides) if self.anchor_base_sizes_cfg is None else self.anchor_base_sizes_cfg super(Anchors, self).__init__() def forward(self, x): """Create anchor.""" cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore = x featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] anchor_generators = [] for anchor_base in self.anchor_base_sizes: anchor_generators.append(AnchorGenerator(anchor_base, self.anchor_scales, self.anchor_ratios)) num_imgs = len(img_metas) num_levels = len(featmap_sizes) multi_level_anchors = [] for i in range(num_levels): anchors = anchor_generators[i].grid_anchors(featmap_sizes[i], self.anchor_strides[i]) multi_level_anchors.append(anchors) anchor_list = [multi_level_anchors for _ in range(num_imgs)] valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = [] for i in range(num_levels): anchor_stride = self.anchor_strides[i] feat_h, feat_w = featmap_sizes[i] h, w, _ = img_meta['pad_shape'] valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h) valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w) flags = anchor_generators[i].valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w)) multi_level_flags.append(flags) valid_flag_list.append(multi_level_flags) return anchor_list, valid_flag_list, multi_level_anchors, gt_bboxes, img_metas, gt_bboxes_ignore def multi_apply(func, *args, **kwargs): """Multi apply. :param func: function :param args: args of function :return: result """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) @NetworkFactory.register(NetTypes.Operator) class RpnClsLoss(nn.Module): """Rpn Class Loss.""" def __init__(self, out_channels=2): super(RpnClsLoss, self).__init__() self.loss_cls = CustomCrossEntropyLoss() self.loss_bbox = CustomSmoothL1Loss() self.out_channels = out_channels def forward(self, x): """Get x.""" (cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_pos, num_total_neg, num_total_samples) = x losses_cls, losses_bbox = multi_apply(self.loss, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_samples=num_total_samples) return losses_cls, losses_bbox def loss(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """Get loss.""" labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.out_channels) loss_cls = self.loss_cls(cls_score, labels, label_weights, avg_factor=num_total_samples) bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) loss_bbox = self.loss_bbox(bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox @NetworkFactory.register(NetTypes.Operator) class CustomCrossEntropyLoss(nn.Module): """Cross Entropy Loss.""" def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', loss_weight=1.0): """Init Cross Entropy loss. :param desc: config dict """ super(CustomCrossEntropyLoss, self).__init__() self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight if self.use_sigmoid: self.loss_function = binary_cross_entropy elif self.use_mask: self.loss_function = mask_cross_entropy else: self.loss_function = cross_entropy def forward(self, cls_score, label, weight, avg_factor, reduction_override=None, **kwargs): """Forward compute.""" assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_cls = self.loss_weight * self.loss_function(cls_score, label, weight, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_cls @NetworkFactory.register(NetTypes.Operator) class CustomSmoothL1Loss(nn.Module): """Smooth L1 Loss.""" def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): """Init smooth l1 loss.""" super(CustomSmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward compute. :param pred: predict :param target: target :param weight: weight :param avg_factor: avg factor :param reduction_override: reduce override :return: loss """ reduction = ( reduction_override if reduction_override else self.reduction) if target.numel() > 0: loss_bbox = self.loss_weight * smooth_l1_loss( pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox else: return torch.FloatTensor([0.0]).cuda() @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): """Smooth l1 loss. :param pred: predict :param target: target :param beta: beta :return: loss """ assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta) return loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): """Cross entropy losses. :param pred: predict result :param label: gt label :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss """ loss = F.cross_entropy(pred, label, reduction='none') if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def _expand_binary_labels(labels, label_weights, label_channels): """Expand binary labels. :param labels: labels :param label_weights: label weights :param label_channels: label channels :return: binary label and label weights """ bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 if label_weights is None: bin_label_weights = None else: bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size(0), label_channels) return bin_labels, bin_label_weights def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): """Binary cross entropy loss. :param pred: predict result :param label: gt label :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss """ if pred.dim() != label.dim(): label, weight = _expand_binary_labels(label, weight, pred.size(-1)) if weight is not None: weight = weight.float() loss = F.binary_cross_entropy_with_logits( pred, label.float(), weight, reduction='none') loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor) return loss def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None): """Mask cross entropy loss. :param pred: predict result :param target: target :param label: gt label :param reduction: reduce function :param avg_factor: avg factor :return: loss """ assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None] def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Weight reduce loss. :param loss: losses :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) else: if reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def reduce_loss(loss, reduction): """Reduce loss compute. :param loss: losses :param reduction: reduce funtion :return: loss """ reduction_function = F._Reduction.get_enum(reduction) if reduction_function == 0: return loss elif reduction_function == 1: return loss.mean() elif reduction_function == 2: return loss.sum()
37.991758
116
0.653048
1,787
13,829
4.797426
0.15277
0.031494
0.012598
0.018897
0.356468
0.262685
0.232124
0.1829
0.156421
0.0925
0
0.01184
0.242678
13,829
363
117
38.096419
0.806741
0.157857
0
0.175115
0
0
0.028813
0
0
0
0
0
0.018433
1
0.105991
false
0
0.0553
0
0.281106
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a6637af877e66a30d055aa9bfab27307de91c10
5,292
py
Python
scrapy/http/request/__init__.py
joybhallaa/scrapy
e4750f2fbdacbeb7a20ae7c6b13bba3fb0f7ad54
[ "BSD-3-Clause" ]
1
2020-04-18T16:48:49.000Z
2020-04-18T16:48:49.000Z
scrapy/http/request/__init__.py
Venfox/scrapy
cf39602c3038d576e14c20a2ac22f88006deb63b
[ "BSD-3-Clause" ]
null
null
null
scrapy/http/request/__init__.py
Venfox/scrapy
cf39602c3038d576e14c20a2ac22f88006deb63b
[ "BSD-3-Clause" ]
null
null
null
""" This module implements the Request class which is used to represent HTTP requests in Scrapy. See documentation in docs/topics/request-response.rst """ from w3lib.url import safe_url_string from scrapy.http.headers import Headers from scrapy.utils.python import to_bytes from scrapy.utils.trackref import object_ref from scrapy.utils.url import escape_ajax from scrapy.http.common import obsolete_setter from scrapy.utils.curl import curl_to_request_kwargs class Request(object_ref): def __init__(self, url, callback=None, method='GET', headers=None, body=None, cookies=None, meta=None, encoding='utf-8', priority=0, dont_filter=False, errback=None, flags=None, cb_kwargs=None): self._encoding = encoding # this one has to be set first self.method = str(method).upper() self._set_url(url) self._set_body(body) assert isinstance(priority, int), "Request priority not an integer: %r" % priority self.priority = priority if callback is not None and not callable(callback): raise TypeError('callback must be a callable, got %s' % type(callback).__name__) if errback is not None and not callable(errback): raise TypeError('errback must be a callable, got %s' % type(errback).__name__) self.callback = callback self.errback = errback self.cookies = cookies or {} self.headers = Headers(headers or {}, encoding=encoding) self.dont_filter = dont_filter self._meta = dict(meta) if meta else None self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None self.flags = [] if flags is None else list(flags) @property def cb_kwargs(self): if self._cb_kwargs is None: self._cb_kwargs = {} return self._cb_kwargs @property def meta(self): if self._meta is None: self._meta = {} return self._meta def _get_url(self): return self._url def _set_url(self, url): if not isinstance(url, str): raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__) s = safe_url_string(url, self.encoding) self._url = escape_ajax(s) if ('://' not in self._url) and (not self._url.startswith('data:')): raise ValueError('Missing scheme in request url: %s' % self._url) url = property(_get_url, obsolete_setter(_set_url, 'url')) def _get_body(self): return self._body def _set_body(self, body): if body is None: self._body = b'' else: self._body = to_bytes(body, self.encoding) body = property(_get_body, obsolete_setter(_set_body, 'body')) @property def encoding(self): return self._encoding def __str__(self): return "<%s %s>" % (self.method, self.url) __repr__ = __str__ def copy(self): """Return a copy of this Request""" return self.replace() def replace(self, *args, **kwargs): """Create a new Request with the same attributes except for those given new values. """ for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags', 'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']: kwargs.setdefault(x, getattr(self, x)) cls = kwargs.pop('cls', self.__class__) return cls(*args, **kwargs) @classmethod def from_curl(cls, curl_command, ignore_unknown_options=True, **kwargs): """Create a Request object from a string containing a `cURL <https://curl.haxx.se/>`_ command. It populates the HTTP method, the URL, the headers, the cookies and the body. It accepts the same arguments as the :class:`Request` class, taking preference and overriding the values of the same arguments contained in the cURL command. Unrecognized options are ignored by default. To raise an error when finding unknown options call this method by passing ``ignore_unknown_options=False``. .. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request` subclasses, such as :class:`~scrapy.http.JSONRequest`, or :class:`~scrapy.http.XmlRpcRequest`, as well as having :ref:`downloader middlewares <topics-downloader-middleware>` and :ref:`spider middlewares <topics-spider-middleware>` enabled, such as :class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`, :class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`, or :class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`, may modify the :class:`~scrapy.http.Request` object. To translate a cURL command into a Scrapy request, you may use `curl2scrapy <https://michael-shub.github.io/curl2scrapy/>`_. """ request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options) request_kwargs.update(kwargs) return cls(**request_kwargs)
38.071942
102
0.634732
655
5,292
4.949618
0.277863
0.022209
0.018507
0.011721
0.047502
0.028378
0.014189
0
0
0
0
0.001287
0.265873
5,292
138
103
38.347826
0.833205
0.311602
0
0.039474
0
0
0.087771
0
0
0
0
0
0.013158
1
0.157895
false
0
0.092105
0.052632
0.421053
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a6664a131eebc11f4bbd4774aef93f20aa62a4d
7,261
py
Python
game.py
akaeme/BlackJackBot
04970107202a24059f8da933233fba7df9f3a0ef
[ "MIT" ]
null
null
null
game.py
akaeme/BlackJackBot
04970107202a24059f8da933233fba7df9f3a0ef
[ "MIT" ]
null
null
null
game.py
akaeme/BlackJackBot
04970107202a24059f8da933233fba7df9f3a0ef
[ "MIT" ]
null
null
null
#encoding: utf8 __author__ = 'Diogo Gomes' __email__ = '[email protected]' __license__ = "GPL" __version__ = "0.1" import copy import card from shoe import Shoe from dealer import Dealer from player import Player BET_MULTIPLIER = 2 class Game(object): class Rules(): def __init__(self, shoe_size=4, min_bet=1, max_bet=10): self.shoe_size = shoe_size self.min_bet = min_bet self.max_bet = max_bet self.bet_multiplier = BET_MULTIPLIER def __str__(self): return "RULES\tMin bet: {}, Max bet: {}, Shoe size: {}, Bet multiplier: {}".format(self.min_bet, self.max_bet, self.shoe_size, self.bet_multiplier) class PlayerState(): def __init__(self, p): self.player = p self.bet = 0 self.hand = [] self.bust = False self.done = False self.watch = False def copy(self): return copy.deepcopy(self) def __str__(self): if isinstance(self.player, Dealer): return "{}".format(self.hand) return "{} ({}€)".format(self.hand, self.bet) def __repr__(self): return "{}".format(self.player.name) def hide_card(self): h = self.copy() h.hand = h.hand[1:] return h def want_to_play(self, rules): return self.player.want_to_play(rules) def take_bet(self, state, rules): bet = 0 while (bet!=self.bet and self.bet!=0) or not (rules.min_bet <= bet <= rules.max_bet) : #bets can't be 0 and double down means double down bet = self.player.bet(state[0].hide_card(), state[1:]) self.bet += bet def __init__(self, players, shoe_size=4, debug=False, verbose=True, min_bet=1, max_bet=10, shoe=None): if verbose: # print(chr(27) + "[2J") print("-"*80) self.verbose = verbose self.debug = debug self.rules = self.Rules(shoe_size=shoe_size, min_bet=min_bet, max_bet=max_bet) self.shoe = Shoe(shoe_size) if shoe != None: self.shoe = shoe self.shoe.shuffle() self.state = [self.PlayerState(Dealer())] + [self.PlayerState(p) for p in players] self.done = False def str_players_hands(self): o = "" for p in self.state[1:]: o+="{!s:^45}".format(p) return o def str_players_names(self): o = "" for p in self.state[1:]: o+="{!s:^35}".format(p.player) return o def __str__(self): return (\ "{:^30}\n"\ "╔"+"═══════════════════════════════"*(len(self.state)-1)+"╗\n"\ "{!s:^45}\n"\ " \n"\ " \n"\ " \n"\ " \n"\ " \n"\ "{!s}\n"\ "╚"+"═══════════════════════════════"*(len(self.state)-1)+"╝\n"\ "{}\n"\ ).format(self.state[0].player.name, self.state[0].hand if self.done else (["**"]+self.state[0].hide_card().hand if len(self.state[0].hand) else []), self.str_players_hands(), self.str_players_names()) def deal(self, num): return self.shoe.deal_cards(1) def take_bets(self): if self.debug: print(self) for p in self.state[1:]: if p.want_to_play(self.rules): p.take_bet(self.state, self.rules) else: p.watch = True def loop(self): #deal initial cards self.state[0].hand += self.shoe.deal_cards(2) for p in self.state[1:]: if not p.watch: p.hand += self.shoe.deal_cards(2) turn = 0 if card.blackjack(self.state[0].hand): #if the dealer has blackjack there is no point in playing... self.done = True return [p for p in self.state[1:] if card.blackjack(p.hand)] #lets play while not self.done: turn += 1 hits = 0 for p in self.state[::-1]: if p.watch or p.bust or p.done or card.value(p.hand) == 21: #skip players watching, bust players, players who have double down and players who already have blackjack! continue if self.debug: print("TURN {}: {}".format(turn, p.player.name)) print(self) action = "" while action not in ["h", "s", "d", "u"]: if isinstance(p.player, Dealer): action = p.player.play(self.state[0], self.state[1:]) else: action = p.player.play(self.state[0].hide_card(), self.state[1:]) if action == "d" and turn != 1: print("YOU CAN'T DOUBLE DOWN!!! double down is only available on the 1st turn") action = "" if action == "u": p.watch = True continue if action == "d": p.take_bet(self.state,self.rules) p.done = True if action in ["h", "d"]: p.hand+=self.deal(1) hits +=1 if card.value(p.hand) >= 21: if card.value(p.hand) > 21: p.bust = True else: p.done = True #already has blackjack if isinstance(p.player, Dealer): self.done = True #game is over we already have a blackjack if hits == 0: self.done = True self.done = True return [p for p in self.state if not isinstance(p.player, Dealer) and #Dealer is not really a winner not card.blackjack(self.state[0].hand) and #If dealer gets blackjack no one wins not p.watch and #players watching can't win :) not p.bust and #bust players can't win :) (card.value(p.hand) >= card.value(self.state[0].hand) or self.state[0].bust) #winners have more points then the dealer or the dealer has gone bust ] def show_table(self): for p in self.state[1:]: p.player.show(self.state) def payback(self, winners): for p in self.state[1:]: if p.watch: #check if player surrendered if p.bet > 0: p.player.payback(-p.bet//2) #this means the player lost half his bet #skip watchers continue if p in winners and card.value(self.state[0].hand) == card.value(p.hand): p.player.payback(0) #bet is returned elif p in winners: p.player.payback(-p.bet + p.bet*BET_MULTIPLIER) else: p.player.payback(-p.bet) #this means the player lost def run(self): self.take_bets() winners = self.loop() self.show_table() self.payback(winners) if self.verbose: print(self) print("🏆 Winners: "+str(winners))
36.305
208
0.493596
928
7,261
3.826509
0.171336
0.076035
0.033793
0.025345
0.264714
0.152633
0.093495
0.050972
0.045621
0.032104
0
0.016751
0.375155
7,261
199
209
36.487437
0.750937
0.089657
0
0.27381
0
0
0.070258
0.009408
0
0
0
0
0
1
0.113095
false
0
0.029762
0.035714
0.238095
0.041667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a67bdcc24a12daa838689d0d299113ff13d2c1e
7,044
py
Python
lib/TWCManager/Status/HASSStatus.py
Saftwerk/TWCManager
9b17c063ada80fc159db82fe6e3ad8c4ca071a1a
[ "Unlicense" ]
1
2021-12-26T03:41:22.000Z
2021-12-26T03:41:22.000Z
lib/TWCManager/Status/HASSStatus.py
Saftwerk/TWCManager
9b17c063ada80fc159db82fe6e3ad8c4ca071a1a
[ "Unlicense" ]
null
null
null
lib/TWCManager/Status/HASSStatus.py
Saftwerk/TWCManager
9b17c063ada80fc159db82fe6e3ad8c4ca071a1a
[ "Unlicense" ]
null
null
null
# HomeAssistant Status Output # Publishes the provided sensor key and value pair to a HomeAssistant instance import logging import time from ww import f logger = logging.getLogger(__name__.rsplit(".")[-1]) class HASSStatus: import threading import requests apiKey = None config = None configConfig = None configHASS = None master = None msgRateInSeconds = 60 resendRateInSeconds = 3600 retryRateInSeconds = 60 msgQueue = {} status = False serverIP = None serverPort = 8123 useHttps = False timeout = 2 backgroundTasksLock = threading.Lock() backgroundTasksThread = None def __init__(self, master): self.config = master.config self.master = master try: self.configConfig = self.config["config"] except KeyError: self.configConfig = {} try: self.configHASS = self.config["status"]["HASS"] except KeyError: self.configHASS = {} self.status = self.configHASS.get("enabled", False) self.serverIP = self.configHASS.get("serverIP", None) self.serverPort = self.configHASS.get("serverPort", 8123) self.useHttps = self.configHASS.get("useHttps", False) self.apiKey = self.configHASS.get("apiKey", None) self.msgRateInSeconds = self.configHASS.get("msgRateInSeconds", 60) self.resendRateInSeconds = self.configHASS.get("resendRateInSeconds", 3600) self.retryRateInSeconds = self.configHASS.get("retryRateInSeconds", 60) # Unload if this module is disabled or misconfigured if ( (not self.status) or (not self.serverIP) or (int(self.serverPort) < 1) or (not self.apiKey) ): self.master.releaseModule("lib.TWCManager.Status", "HASSStatus") else: self.backgroundTasksThread = self.threading.Thread( target=self.background_task_thread, args=() ) self.backgroundTasksThread.daemon = True self.backgroundTasksThread.start() def getTwident(self, twcid): # Format TWCID nicely if len(twcid) == 2: return "%02X%02X" % (twcid[0], twcid[1]) else: return str(twcid.decode("utf-8")) def background_task_thread(self): while True: time.sleep(self.msgRateInSeconds) self.backgroundTasksLock.acquire() for msgKey in self.msgQueue: msg = self.msgQueue[msgKey] if msg.elapsingTime < time.time(): self.sendingStatusToHASS(msg) self.backgroundTasksLock.release() def getSensorName(self, twcid, key_underscore): return "sensor.twcmanager_" + str(self.getTwident(twcid)) + "_" + key_underscore def setStatus(self, twcid, key_underscore, key_camelcase, value, unit): self.backgroundTasksLock.acquire() sensor = self.getSensorName(twcid, key_underscore) if (sensor not in self.msgQueue) or (self.msgQueue[sensor].value != value): self.msgQueue[sensor] = HASSMessage( time.time(), sensor, twcid, key_underscore, key_camelcase, value, unit, ) self.backgroundTasksLock.release() def sendingStatusToHASS(self, msg): http = "http://" if not (self.useHttps) else "https://" url = http + self.serverIP + ":" + self.serverPort url = url + "/api/states/" + msg.sensor headers = { "Authorization": "Bearer " + self.apiKey, "content-type": "application/json", } try: logger.log( logging.INFO8, f( "Sending POST request to HomeAssistant for sensor {msg.sensor} (value {msg.value})." ), ) devclass = "" if str.upper(msg.unit) in ["W", "A", "V", "KWH"]: devclass = "power" if len(msg.unit) > 0: self.requests.post( url, json={ "state": msg.value, "attributes": { "unit_of_measurement": msg.unit, "device_class": devclass, "friendly_name": "TWC " + str(self.getTwident(msg.twcid)) + " " + msg.key_camelcase, }, }, timeout=self.timeout, headers=headers, ) else: self.requests.post( url, json={ "state": msg.value, "attributes": { "friendly_name": "TWC " + str(self.getTwident(msg.twcid)) + " " + msg.key_camelcase }, }, timeout=self.timeout, headers=headers, ) # Setting elapsing time to now + resendRateInSeconds self.msgQueue[msg.sensor].elapsingTime = ( time.time() + self.resendRateInSeconds ) except self.requests.exceptions.ConnectionError as e: logger.log( logging.INFO4, "Error connecting to HomeAssistant to publish sensor values", ) logger.debug(str(e)) self.settingRetryRate(msg) return False except self.requests.exceptions.ReadTimeout as e: logger.log( logging.INFO4, "Error connecting to HomeAssistant to publish sensor values", ) logger.debug(str(e)) self.settingRetryRate(msg) return False except Exception as e: logger.log( logging.INFO4, "Error during publishing HomeAssistant sensor values" ) logger.debug(str(e)) self.settingRetryRate(msg) return False def settingRetryRate(self, msg): # Setting elapsing time to now + retryRateInSeconds self.msgQueue[msg.sensor].elapsingTime = ( time.time() + self.retryRateInSeconds ) class HASSMessage: elapsingTime = 0 sensor = "" twcid = "" key_underscore = "" key_camelcase = "" value = None unit = "" def __init__( self, elapsingTime, sensor, twcid, key_underscore, key_camelcase, value, unit ): self.elapsingTime = elapsingTime self.sensor = sensor self.twcid = twcid self.key_underscore = key_underscore self.key_camelcase = key_camelcase self.value = value self.unit = unit
33.383886
104
0.52598
619
7,044
5.92084
0.25525
0.038199
0.037108
0.02292
0.265757
0.25266
0.25266
0.233561
0.209004
0.135061
0
0.009397
0.380608
7,044
210
105
33.542857
0.830621
0.039182
0
0.289617
0
0
0.089484
0.003106
0
0
0
0
0
1
0.043716
false
0
0.027322
0.005464
0.240437
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a682a6477a9ae21b7ff09cd8fd4db9201909c6a
809
py
Python
Archive/routes/home_routes.py
taycurran/TwitOff
6e2ee13f83fa86c80988a91b3b41ed0958688c3c
[ "MIT" ]
null
null
null
Archive/routes/home_routes.py
taycurran/TwitOff
6e2ee13f83fa86c80988a91b3b41ed0958688c3c
[ "MIT" ]
3
2021-06-08T21:05:06.000Z
2022-01-13T02:20:50.000Z
Archive/routes/home_routes.py
taycurran/TwitOff
6e2ee13f83fa86c80988a91b3b41ed0958688c3c
[ "MIT" ]
null
null
null
from flask import Blueprint, jsonify, request, render_template home_routes = Blueprint("home_routes", __name__) @home_routes.route("/") def index(): users = User.query.all() return render_template('base.html', title='Home', users=users) @home_routes.route("/about") def about(): return "About Me" @home_routes.route('/reset') def reset(): DB.drop_all() DB.create_all() return render_template('base.html', title='Reset', users=[]) # # Add config for database # app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3' # # stop tracking modifications on sqlalchemy config # app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # # ? app.config["TWITTER_API_CLIENT"] = twitter # # Have the database know about the app # DB.init_app(app)
25.28125
64
0.678616
102
809
5.176471
0.490196
0.094697
0.085227
0.087121
0.136364
0.136364
0.136364
0
0
0
0
0.001504
0.177998
809
32
65
25.28125
0.792481
0.358467
0
0
0
0
0.116601
0
0
0
0
0
0
1
0.2
false
0
0.066667
0.066667
0.466667
0.133333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a6a0fd024fe59393b29eb7bb5c4f5bdd676e60b
8,845
py
Python
intent/scripts/classification/ctn_to_classifier.py
rgeorgi/intent
9920798c126f6d354029f7bb0a345e7cdb649f3a
[ "MIT" ]
3
2016-08-05T01:11:57.000Z
2017-08-26T15:35:51.000Z
intent/scripts/classification/ctn_to_classifier.py
rgeorgi/intent
9920798c126f6d354029f7bb0a345e7cdb649f3a
[ "MIT" ]
2
2016-03-01T22:41:24.000Z
2016-09-14T18:39:25.000Z
intent/scripts/classification/ctn_to_classifier.py
rgeorgi/intent
9920798c126f6d354029f7bb0a345e7cdb649f3a
[ "MIT" ]
null
null
null
from argparse import ArgumentParser from collections import defaultdict import glob import os import pickle from random import shuffle, seed import sys from tempfile import mkdtemp import shutil import logging root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) CTN_LOG = logging.getLogger('CTN_CLASS') CTN_LOG.setLevel(logging.DEBUG) logging.basicConfig() from intent.igt.metadata import set_intent_method, get_intent_method from intent.interfaces.stanford_tagger import StanfordPOSTagger from intent.pos.TagMap import TagMap from intent.utils.env import tagger_model, proj_root from xigt.codecs import xigtxml from xigt.consts import ALIGNMENT from intent.eval.pos_eval import poseval from intent.igt.consts import GLOSS_WORD_ID, POS_TIER_TYPE, LANG_WORD_ID, GLOSS_WORD_TYPE, POS_TIER_ID, \ INTENT_TOKEN_TYPE, INTENT_POS_PROJ, LANG_WORD_TYPE, TRANS_WORD_TYPE, TRANS_WORD_ID, MANUAL_POS, INTENT_POS_CLASS from intent.igt.rgxigt import RGCorpus, strip_pos, RGIgt, RGTokenTier, RGTier, gen_tier_id, RGToken, \ ProjectionTransGlossException, word_align from intent.interfaces.mallet_maxent import MalletMaxent from intent.scripts.classification.xigt_to_classifier import instances_to_classifier from intent.utils.token import POSToken, GoldTagPOSToken from intent.igt.igtutils import rgp __author__ = 'rgeorgi' """ The purpose of this module is to evaluate the POS-line classifiers trained on """ def eval_classifier(c, inst_list, context_feats=False, posdict=None): """ :param c: The classifier :param inst_list: A list of Igt instances to test against. Must already have POS tags. """ gold_sents = [] eval_sents = [] to_dump = RGCorpus() for inst in inst_list: to_tag = inst.copy() strip_pos(to_tag) # Do the classification. to_tag.classify_gloss_pos(c, lowercase=True, feat_next_gram=context_feats, feat_prev_gram=context_feats, posdict=posdict) to_dump.append(to_tag) # Fix the tags... # fix_ctn_gloss_line(to_tag, tag_method=INTENT_POS_CLASS) # Now, retrieve eval/gold. eval_tags = [v.value() for v in to_tag.get_pos_tags(GLOSS_WORD_ID, tag_method=INTENT_POS_CLASS)] gold_tags = [v.value() for v in inst.get_pos_tags(GLOSS_WORD_ID, tag_method=MANUAL_POS)] tag_tokens = [POSToken('a', label=l) for l in eval_tags] gold_tokens= [POSToken('a', label=l) for l in gold_tags] if not len(tag_tokens) == len(gold_tokens): print("LENGTH OF SEQUENCE IS MISMATCHED") continue gold_sents.append(gold_tokens) eval_sents.append(tag_tokens) xigtxml.dump(open('./enriched_ctn_dev.xml', 'w'), to_dump) return poseval(eval_sents, gold_sents, details=True,csv=True, matrix=True) def eval_proj(xc): prj_sents = [] sup_sents = [] for inst in xc: fix_ctn_gloss_line(inst, tag_method=INTENT_POS_PROJ) # Do the projection comparison sup = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=MANUAL_POS) prj = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=INTENT_POS_PROJ) sup_tags = [] prj_tags = [] for s in sup: sup_tags.append(POSToken(s.value(), label=s.value())) # If the same tag occurs in the projections... if not prj: prj_tags.append(POSToken('UNALIGNED', label='UNALIGNED')) continue proj_tag = prj.find(alignment=s.attributes[ALIGNMENT]) if proj_tag: prj_tags.append(POSToken(proj_tag.value(), label=proj_tag.value())) else: prj_tags.append(POSToken('UNALIGNED', label='UNALIGNED')) sup_sents.append(sup_tags) prj_sents.append(prj_tags) poseval(prj_sents, sup_sents, details=True) def fix_ctn_gloss_line(inst, tag_method=None): """ Given a CTN gloss line, do some specific fixes to attempt to fix the CTN tag mapping. :param inst: :type inst:RGIgt """ gpos_tier = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=tag_method) # Get the gloss words for gw in inst.gloss: new_tag = None if gw.value().lower() in ['foc','top','seq','add','emph','cit','rep']: new_tag = 'PRT' elif gw.value().lower() in ['but','and','or']: new_tag = 'CONJ' elif 'dem' in gw.value().lower(): new_tag = 'PRON' elif gw.value().lower() in ['for','in']: new_tag = 'ADP' elif gw.value().lower() in ['the']: new_tag = 'DET' if new_tag: gpos = gpos_tier.find(alignment=gw.id) if not gpos: gpt = RGToken(id=gpos_tier.askItemId(), alignment=gw.id, text=new_tag) gpos_tier.add(gpt) else: gpos.text = new_tag if __name__ == '__main__': ctn_train = './data/xml-files/ctn/ctn_train.xml' ctn_dev = './data/xml-files/ctn/ctn_dev.xml' ctn_dev_processed = './data/xml-files/ctn/ctn_dev_processed.xml' ctn_train_processed = './data/xml-files/ctn/ctn_train_processed.xml' posdict = pickle.load(open('./data/dictionaries/CTN.dict', 'rb')) # print("Loading CTN Dev Corpus...", end=" ", flush=True) # dev_xc = RGCorpus.load(ctn_dev) # print("Done.") # # print("Loading CTN Train corpus...", end=" ", flush=True) # train_xc = RGCorpus.load(ctn_train) # print("Done.") print("Initializing tagger...", end=" ", flush=True) tagger = StanfordPOSTagger(tagger_model) print("Done.") # ============================================================================= # 1) Start by projecting the language line to the gloss line in the dev set, # remapping it from the CTN tagset to the universal tagset along the way. # ============================================================================= # # print("Processing DEV corpus...", end=' ', flush=True) # for inst in dev_xc: # word_align(inst.gloss, inst.lang) # inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt') # fix_ctn_gloss_line(inst, tag_method=MANUAL_POS) # inst.tag_trans_pos(tagger) # inst.heur_align() # Align trans/gloss lines heuristically # inst.project_trans_to_gloss() # Now, project heuristically. # print('done.') # # xigtxml.dump(open(ctn_dev_processed, 'w', encoding='utf-8'), dev_xc) # # # print("Processing TRAIN Corpus...", end=' ', flush=True) # # Get the language line words projected onto the gloss... # for inst in train_xc: # word_align(inst.gloss, inst.lang) # inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt') # inst.tag_trans_pos(tagger) # inst.heur_align() # inst.project_trans_to_gloss() # fix_ctn_gloss_line(inst, tag_method=INTENT_POS_PROJ) # # print("Done.") # # xigtxml.dump(open(ctn_train_processed, 'w', encoding='utf-8'), train_xc) # sys.exit() print("Loading Processed CTN Train corpus...", end=" ", flush=True) train_xc = RGCorpus.load(ctn_train_processed) print("Done.") print("Loading Processed CTN Dev corpus...", end=" ", flush=True) dev_xc = RGCorpus.load(ctn_dev_processed) print("Done.") # # # ============================================================================= # # 2) Train a classifier based on the projected gloss line. # # ============================================================================= # index_list = [35,70,106,141,284,569,854,1139,1424,1708,1993,7120] for train_stop_index in index_list: train_instances = list(train_xc)[0:train_stop_index] print('* '*50) tokens = 0 for inst in train_instances: tokens += len(inst.gloss) print("Now training with {} tokens, {} instances.".format(tokens, train_stop_index)) print("Training Classifier...", end=" ", flush=True) c = instances_to_classifier(train_instances, './ctn-train.class', tag_method=MANUAL_POS, posdict=posdict, context_feats=True, feat_path='./ctn-train_feats.txt') print("Done.") # c = MalletMaxent('/Users/rgeorgi/Documents/code/dissertation/gc.classifier') # c = MalletMaxent('./ctn_class.class.classifier') print("Evaluating classifier...", end=" ", flush=True) eval_classifier(c, dev_xc, posdict=posdict, context_feats=True) print("Done.") # eval_proj(dev_xc)
33.25188
116
0.614019
1,131
8,845
4.565871
0.232538
0.020914
0.020914
0.020914
0.267622
0.2134
0.17622
0.148335
0.12471
0.117351
0
0.006994
0.240249
8,845
265
117
33.377358
0.761458
0.261617
0
0.084615
0
0
0.096026
0.035162
0
0
0
0
0
1
0.023077
false
0
0.176923
0
0.207692
0.1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a6abcccf806b40379eafeffa1d5d6385d6c8a7c
1,358
py
Python
watchdog/back-end/v0.3.0/watchdog/app/resource/video.py
Havana3351/Low-cost-remote-monitor
9f86a62b8515c0f9fddda31f25548680f0ad8e2f
[ "MIT" ]
18
2021-12-03T13:18:07.000Z
2022-03-30T20:20:17.000Z
watchdog/back-end/v1.0.0/watchdogV1-3/app/resource/video.py
Fairywyt/Low-cost-remote-monitor
263b98d969251d2dbef5fb5e4d42a58075e744fa
[ "MIT" ]
null
null
null
watchdog/back-end/v1.0.0/watchdogV1-3/app/resource/video.py
Fairywyt/Low-cost-remote-monitor
263b98d969251d2dbef5fb5e4d42a58075e744fa
[ "MIT" ]
4
2022-03-22T09:58:00.000Z
2022-03-28T08:57:17.000Z
from flask_restful import Resource from flask import Response import os import cv2 picturecounter = 1 # 防止过多记录的标识 class Video(Resource): #如果方法为get 调用该方法 def get(self): global picturecounter # username = (request.get_json())['username'] # db = pymysql.connect("rm-2ze61i7u6d7a3fwp9yo.mysql.rds.aliyuncs.com", "team", "Aaa5225975", "pidata") # cursor = db.cursor() # # sql = "select rpiname from user where username=\'" + username + "\'" # 可能存在类型问题 # cursor.execute(sql) # row = cursor.fetchone() # # if not row: # rpiname = None # rpiname = str(row[0]) #覆盖取值 rpiname = 'raspberrypi' # 获取指针并赋值 path = r'/root/video/realtime/%s' % (rpiname) picnames = [] for filenames in os.walk(path): picnames = filenames print(picnames) pointer = int(((picnames[2])[0].split('.'))[0]) picturecounter = pointer picpath = r'/root/video/realtime/%s/%s.jpg' % (rpiname, picturecounter) image = cv2.imread(picpath) bs = cv2.imencode(".jpg", image)[1].tobytes() picturecounter += 1 if(picturecounter > 5): picturecounter = 1 return Response(bs, mimetype='image/jpeg') def post(self): print("post")
26.115385
111
0.563328
140
1,358
5.45
0.564286
0.058978
0.026212
0.047182
0.049803
0
0
0
0
0
0
0.028571
0.304124
1,358
52
112
26.115385
0.778836
0.279087
0
0.08
0
0
0.086189
0.055036
0
0
0
0
0
1
0.08
false
0
0.16
0
0.32
0.08
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a6b4ad6f031ba8193614f726faf3a710def3c48
22,385
py
Python
codes/ambfix.py
valgur/LEOGPS
f289f279ef55980a0e3fd82b3b3686e41c474a2e
[ "MIT" ]
null
null
null
codes/ambfix.py
valgur/LEOGPS
f289f279ef55980a0e3fd82b3b3686e41c474a2e
[ "MIT" ]
null
null
null
codes/ambfix.py
valgur/LEOGPS
f289f279ef55980a0e3fd82b3b3686e41c474a2e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 ''' ############################################################################### ############################################################################### ## ## ## _ ___ ___ ___ ___ ___ ## ## | | | __ / \ / __| _ | __| ## ## | |__| __ ( ) | (_ | _|__ \ ## ## |____|___ \___/ \___|_| \___/ ## ## v 1.0 (Stable) ## ## ## ## FILE DESCRIPTION: ## ## ## ## This is the classical LAMBDA method that was originally authored by ## ## Teunissen, Jonge, and Tiberius (1993). The code was later written in ## ## MATLAB by Dr Sandra Verhagen and Dr Bofeng Li. It takes in a vector of ## ## float ambiguities to the integer least-squares problem, and covariance ## ## of the float ambiguities. It then runs the LAMBDA's ILS search-&-shrink ## ## and spits out the ambiguity integers. The other 5 methods in original ## ## LAMBDA MATLAB code are not supported here (feel free to edit the code ## ## and implement it youself!). The default ncands = 2, as per original code. ## ## All support functions from the original MATLAB code (decorrel, ldldecom) ## ## have been nested within the main function as sub functions. ## ## ## ## INPUTS: ## ## ## ## - ahat : numpy array of float ambiguities ## ## - Qahat : numpy covariance matrix for float ambiguities ## ## - ncands : number of candidates (optional parameter, default = 2) ## ## ## ## OUTPUT: ## ## ## ## - afixed : Array of size (n x ncands) with the estimated integer ## ## candidates, sorted according to the corresponding squared ## ## norms, best candidate first. ## ## - sqnorm : Distance between integer candidate and float ambiguity ## ## vectors in the metric of the variance-covariance matrix. ## ## ## ## REMARKS: ## ## ## ## Besides above changes, mostly syntax changes to this Python version: ## ## - Everything is identical EXCEPT MATLAB is ones-based indexing. ## ## - Python is zeros-based indexing, and range function does not ## ## include the upper limit index. Thus, only indices have changed. ## ## - Example in MATLAB: for i = 1:5 => {1,2,3,4,5} ## ## - Equivalently in Python: for i in range(0,5) => {0,1,2,3,4} ## ## - Indices are thus updated accordingly. ## ## ## ## DEVELOPER: Professor Peter Teunissen (TU Delft) ## ## ORIGINAL AUTHOR: Sandra Verhagen and Bofeng Li (TU Delft) ## ## AUTHOR MODIFIED: 26-07-2019, by Samuel Y.W. Low, with permissions. ## ## ## ############################################################################### ############################################################################### ''' import numpy as np def LAMBDA( ahat, Qahat, ncands = 2 ): ########################################################################### ########################################################################### # [afixed, sqnorm] = LAMBDA( ahat, Qahat, ncands ) # # This is the main routine of the LAMBDA software package. By default the # ILS method will be used for integer estimation based on the provided # float ambiguity vector ahat and associated variance-covariance matrix # Qahat. In this Pythonic version (modified by Samuel Low, 2019), only # the ILS method is implemented. For other techniques: integer rounding, # bootstrapping or Partial Ambiguity Resolution (PAR), the user is free # to modify this code and adapt it to their own needs. # # NOTE 1: LAMBDA always first applies a decorrelation before the integer # estimation (for ILS this is required to guarantee an efficient search, # for rounding and bootstrapping it is required in order to get higher # success rates). # # INPUTS: # # ahat: Float ambiguities (must be a column!) # Qahat: Variance/covariance matrix of ambiguities # ncands: number of candidates (optional parameter, default = 2) # # OUTPUTS: # # afixed: Array of size (n x ncands) with the estimated integer # candidates, sorted according to the corresponding squared # norms, best candidate first. # sqnorm: Distance between integer candidate and float ambiguity vectors # in the metric of the variance-covariance matrix Qahat. # Only available for ILS. # # ------------------------------------------------------------------------- # Release date : 1-SEPT-2012 # Authors : Bofeng LI and Sandra VERHAGEN # # GNSS Research Centre, Curtin University # Mathematical Geodesy and Positioning, Delft University of Technology # ------------------------------------------------------------------------- # # REFERENCES: # 1. LAMBDA Software Package: Matlab implementation, Version 3.0. # Documentation provided with this software package. # 2. Teunissen P (1993) Least-squares estimation of the integer GPS # ambiguities. In: Invited lecture, section IV theory and methodology, # IAG General Meeting, Beijing, China # 3. Teunissen P (1995) The least-squares ambiguity decorrelation # adjustment: a method for fast GPS ambiguity estitmation. J Geod # 70:651-7 # 4. De Jonge P, Tiberius C (1996) The LAMBDA method of intger ambiguity # estimation:implementation aspects. # 5. Chang X ,Yang X, Zhou T (2005) MLAMBDA: a modified LAMBDA method for # integer least-squares estimation ########################################################################### ########################################################################### ''' A function for obtaining the decimals only from float arrays ''' def floatrem( fltarray ): # This function is NECESSARY because of the differences between: # MATLAB's rem function # (computes the true mathematical remainder) # And Python's modulo % operator # (computes remainder complementary to the floor_divide function) fltarray = np.array(fltarray) fltarray = fltarray + 0.000001 intarray = fltarray.astype(int) decarray = fltarray - intarray return decarray, intarray ########################################################################### ########################################################################### ''' A function to perform LtDL decomposition of the covariance matrix ''' def ldldecom( Qahat1 ): # This routine finds the LtDL decomposition of a given variance or # covariance matrix. # # Input arguments: # Qahat: Symmetric n by n matrix to be factored # # Output arguments: # L: n by n factor matrix (strict lower triangular) # D: Diagonal n-vector # ------------------------------------------------------------------ # File.....: ldldecom # Date.....: 19-MAY-1999 # Author...: Peter Joosten # Mathematical Geodesy and Positioning # Delft University of Technology # ------------------------------------------------------------------ Qahat2 = Qahat1.copy() # If we do not use copy, we will overwrite the original Qahat... # ... even the one outside the function! This doesn't occur in MATLAB. n = len(Qahat2) D = np.zeros((n)) L = np.zeros((n,n)) for i in range(n-1,-1,-1): D[i] = Qahat2[i][i] L[i,0:i+1] = Qahat2[i,0:i+1] / ((Qahat2[i][i])**0.5) for j in range(0,i): Qahat2[j,0:j+1] = Qahat2[j,0:j+1] - L[i,0:j+1]*L[i][j] L[i,0:i+1] = L[i,0:i+1] / L[i][i] return L,D ########################################################################### ########################################################################### ''' Decorrelation function for LAMBDA ''' def decorrel( ahat, Qahat ): # function [Qzhat,Z,L,D,zhat,iZt] = decorrel (Qahat,ahat) # DECORREL: Decorrelate a (co)variance matrix of ambiguities # # [Qzhat,Z,L,D,zhat] = decorrel (Qahat,ahat) # # This routine creates a decorrelated Q-matrix, by finding the # Z-matrix and performing the corresponding transformation. # # The method is described in: # The routine is based on Fortran routines written by Paul de Jonge # and on Matlab-routines written by Kai Borre. # The resulting Z-matrix can be used as follows: # zhat = Zt * ahat; \hat(z) = Z' * \hat(a); # Q_\hat(z) = Z' * Q_\hat(a) * Z # # Input arguments: # Qahat: Variance-covariance matrix of ambiguities (original) # ahat: Original ambiguities (optional) # # Output arguments: # Qzhat: Variance-covariance matrix of decorrelated ambiguities # Z: Z-transformation matrix # L: L matrix (from LtDL-decomposition of Qzhat) # D: D matrix (from LtDL-decomposition of Qzhat) # zhat: Transformed ambiguities (optional) # iZt: inv(Z')-transformation matrix # # ------------------------------------------------------------------ # Function.: decorrel # Date.....: 19-MAY-1999 / modified 12-APRIL-2012 # Author...: Peter Joosten / Sandra Verhagen # Mathematical Geodesy and Positioning # Delft University of Technology # Modified.: Samuel Low, July 2019, DSO National Laboratories # ------------------------------------------------------------------ # Initialisations n = len(Qahat) iZt = np.identity(n) i1 = n - 1 sw = True # LtDL decomposition L, D = ldldecom(Qahat) while sw == 1: i = n # Loop for column from n to 1 sw = 0 while sw == 0 and i > 1: i = i - 1 # The i-th column if i <= i1: for j in range(i,n): # We have to do some manual coding here, as python's # rounding for .5's are different from MATLAB's mu = L[j,i-1] # Get the float mu mu_dec = mu%1 # Get the decimal float of mu if mu_dec == 0.5: mu += 0.01 # Just to make it round up properly. mu = round(mu) if mu != 0.0: L[j:n,i-1] = L[j:n,i-1] - mu * L[j:n,j] iZt[:,j] = iZt[:,j] + mu * iZt[:,i-1] delta = D[i-1] + (L[i,i-1]**2) * D[i] if delta < D[i]: lam = D[i] * L[i,i-1] / delta eta = D[i-1] / delta D[i-1] = eta * D[i] D[i] = delta mult1 = np.array([-1*L[i,i-1], 1]) mult2 = np.array([eta,lam]) mult3 = np.stack((mult1,mult2)) L[i-1:i+1,0:i-1] = np.matmul(mult3,L[i-1:i+1,0:i-1]) L[i,i-1] = lam # Flip rows i and i+1 L[i+1:n,i-1:i+1] = np.flip(L[i+1:n,i-1:i+1], axis=0) iZt[:,i-1:i+1] = np.flip(iZt[:,i-1:i+1], axis=0) i1 = i sw = 1 iZt = iZt + 0.000001 # Resolves Python 3's rounding definition Z = np.round(np.linalg.inv(iZt.transpose())) Qzhat = np.matmul( Qahat, Z ) Qzhat = np.matmul( Z.transpose(), Qzhat ) zhat = np.matmul(Z.transpose(),ahat) iZt = np.round(iZt) return Qzhat, Z, L, D, zhat, iZt ########################################################################### ########################################################################### def ssearch( ahat, L, D, ncands): #------------------------------------------------------------------| # # Integer ambiguity vector search via search-and-shrink technique. # # INPUTS: # # ahat : Float ambiguities (should be decorrelated for # computational efficiency) # L,D : LtDL-decomposition of the variance-covariance matrix # of the float ambiguities ahat # ncands: Number of requested candidates # # OUTPUTS: # # afixed: estimated integers (n, x, ncands) # sqnorm: corresponding squared norms (n-vector, ascending order) # #------------------------------------------------------------------| # Date : 02-SEPT-2010 | # Author : Bofeng LI | # GNSS Research Center, Department of Spatial Sciences | # Curtin University of Technology | # E-mail : [email protected] | #------------------------------------------------------------------| # First, check that float ambiguity and D have same length if len(ahat) != len(D): print('Error! Float ambiguity vector must be a column vector!') print('It must also have the same dimension as D') return None # Initialising outputs n = len(ahat) afixed = np.zeros((n, ncands)) sqnorm = np.zeros(ncands) # Initializing the variables for searching Chi2 = 1.0e+18 # Start search with an infinite chi-square dist = np.zeros(n) # MATLAB distance function endsearch = False # Search trigger count = 0 # Count the number of candidates acond = np.zeros(n) acond[n-1] = ahat[n-1] zcond = np.zeros(n) zcond[n-1] = np.round(acond[n-1]+0.000001) left = acond[n-1] - zcond[n-1] step = np.zeros(n) step[n-1] = np.sign(left) if step[n-1] == 0: step[n-1] = 1 # Give a positive step. imax = ncands - 1 # Initially, the maximum F(z) is at ncands S = np.zeros((n,n)) # Used to compute conditional ambiguities k = n # Now we start the main search loop. while endsearch == False: newdist = dist[k-1] + (left**2) / D[k-1] if newdist < Chi2: if k != 1: # Case 1: move down k -= 1 dist[k-1] = newdist S[k-1,0:k] = S[k,0:k] + (zcond[k] - acond[k])*L[k,0:k] acond[k-1] = ahat[k-1] + S[k-1,k-1] zcond[k-1] = np.round(acond[k-1]+0.000001) left = acond[k-1] - zcond[k-1] step[k-1] = np.sign(left) if step[k-1] == 0: # Very rarely would this happen... step[k-1] = 1 # ... but just in case, you know. else: # Case 2: store the found candidate and try the next. if count < (ncands - 1): # Store the 1st ncands-1 initial points as candidates count += 1 afixed[:,count-1] = zcond[0:n]; sqnorm[count-1] = newdist # Store F(zcond) else: afixed[:,imax] = zcond[0:n] sqnorm[imax] = newdist Chi2 = max(sqnorm) imax = np.argmax(sqnorm) # No need to add '-1' to imax zcond[0] = zcond[0] + step[0] left = acond[0] - zcond[0] step[0] = -1*step[0] - np.sign(step[0]) else: # Case 3: exit or move up if k == n: endsearch = True else: k += 1 # Move up zcond[k-1] = zcond[k-1] + step[k-1] left = acond[k-1] - zcond[k-1] step[k-1] = -1*step[k-1] - np.sign(step[k-1]) order = np.argsort(sqnorm) # Get an array of INDICES for a sort. sqnormf = np.sort(sqnorm) # Get an array of ACTUAL SORTS for sqnorm. afixedf = np.copy(afixed) for k in range(0,len(order)): afixedf[:,k] = afixed[:,order[k]] return afixedf, sqnormf ########################################################################### ########################################################################### ''' Initialisation and some initial sanity checks... ''' # Initialise all output variables sqnorm = np.array([]) # Test inputs: Is the Q-matrix symmetric? if np.array_equal(Qahat,Qahat.transpose()) == False: print('Variance-covariance matrix is not symmetric!') return None # Test inputs: Is the Q-matrix positive-definite? if np.sum(np.linalg.eig(Qahat)[0] > 0.0) != len(Qahat): print('Variance-covariance matrix is not positive definite!') return None # Test inputs: Does Q-matrix and amb vector have identical dimensions? if len(ahat) != len(Qahat): print('Variance-covariance matrix and vector of ambiguities...') print('... do not have identical dimensions!') return None ########################################################################### ########################################################################### ''' Begin least-squares ambiguity decorrelation adjustment! ''' # Remove integer numbers from float solution, so that all values are # between -1 and 1 (for computational convenience only) ahat, incr = floatrem( ahat ) # Compute Z matrix based on the decomposition Q=L^T*D*L; Qzhat, Z, L, D, zhat, iZt = decorrel( ahat, Qahat ) # Integer ambiguity vector search via search-and-shrink zfixedff, sqnormff = ssearch( zhat, L, D, ncands ) # Perform the back-transformation and add the increments afixed = np.matmul(iZt,zfixedff) repmat = np.repeat(np.array([incr]),ncands,axis=0) repmat = repmat.transpose() afixed = afixed + repmat afixed = afixed.transpose() ########################################################################### ########################################################################### ''' Returns best amb-fix, second best amb-fix, and the square norm ''' return afixed, sqnorm ########################################################################### ###########################################################################
42.638095
104
0.3979
2,065
22,385
4.287167
0.245036
0.007907
0.027109
0.002711
0.192025
0.161301
0.111714
0.105162
0.069129
0.055574
0
0.023238
0.421354
22,385
524
105
42.719466
0.660233
0.510476
0
0.068027
0
0
0.031044
0
0
0
0
0
0
1
0.034014
false
0
0.006803
0
0.102041
0.040816
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a6d2f3733dce67a2fafd219a662c5c458e102f9
1,774
py
Python
XORCipher/XOREncrypt.py
KarthikGandrala/DataEncryption
6ed4dffead345bc9f7010ac2ea9afbff958c85af
[ "MIT" ]
1
2021-07-12T06:05:45.000Z
2021-07-12T06:05:45.000Z
XORCipher/XOREncrypt.py
KarthikGandrala/Encrypt-Your-Data
6ed4dffead345bc9f7010ac2ea9afbff958c85af
[ "MIT" ]
null
null
null
XORCipher/XOREncrypt.py
KarthikGandrala/Encrypt-Your-Data
6ed4dffead345bc9f7010ac2ea9afbff958c85af
[ "MIT" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- # Function to encrypt message using key is defined def encrypt(msg, key): # Defining empty strings and counters hexadecimal = '' iteration = 0 # Running for loop in the range of MSG and comparing the BITS for i in range(len(msg)): temp = ord(msg[i]) ^ ord(key[iteration]) # zfill will pad a single letter hex with 0, to make it two letter pair hexadecimal += hex(temp)[2:].zfill(2) # Checking if the iterations of the key are 1 iteration += 1 if iteration >= len(key): # once all of the key's letters are used, repeat the key iteration = 0 # Returning the final value return hexadecimal def decrypt(msg, key): # Defining hex to uni string to store hex_to_uni = '' # Running for loop to the length of message for i in range(0, len(msg), 2): # Decoding each individual bytes from hex hex_to_uni += bytes.fromhex(msg[i:i + 2]).decode('utf-8') decryp_text = '' iteration = 0 # For loop running for the length of the hex to unicode string for i in range(len(hex_to_uni)): # Comparing each individual bit temp = ord(hex_to_uni[i]) ^ ord(key[iteration]) # zfill will pad a single letter hex with 0, to make it two letter pair decryp_text += chr(temp) iteration += 1 if iteration >= len(key): # once all of the key's letters are used, repeat the key iteration = 0 # FInally return the decrypted text string return decryp_text
23.653333
79
0.558061
240
1,774
4.079167
0.35
0.030644
0.040858
0.033708
0.332993
0.304392
0.304392
0.304392
0.304392
0.304392
0
0.01421
0.365276
1,774
74
80
23.972973
0.85524
0.424464
0
0.347826
0
0
0.00498
0
0
0
0
0
0
1
0.086957
false
0
0
0
0.173913
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a6e68db8c94071ad8a29d0149ef1ef93e54c4c1
634
py
Python
02-Use-functions/21-Opening_a_file/secret_message.py
francisrod01/udacity_python_foundations
2a384cf35ce7eff547c88097cdc45cc4e8fc6041
[ "MIT" ]
null
null
null
02-Use-functions/21-Opening_a_file/secret_message.py
francisrod01/udacity_python_foundations
2a384cf35ce7eff547c88097cdc45cc4e8fc6041
[ "MIT" ]
null
null
null
02-Use-functions/21-Opening_a_file/secret_message.py
francisrod01/udacity_python_foundations
2a384cf35ce7eff547c88097cdc45cc4e8fc6041
[ "MIT" ]
null
null
null
#!/usr/bin/python3 import os import random def rename_files(path): file_list = os.listdir(path) print(file_list) for file_name in file_list: # Remove numbers from filename. # new_file_name file_name.translation(None, "0123456789") # Add random numbers to beginning of filename. new_file_name = str(random.randint(1, 99)) + file_name print("Renaming " + file_name + " to " + new_file_name) os.rename(os.path.join(path, file_name), os.path.join(path, new_file_name)) print("# Python program - Adding random numbers to beginning of filename.") rename_files("./prank")
25.36
83
0.679811
90
634
4.588889
0.433333
0.174334
0.106538
0.09201
0.164649
0.164649
0
0
0
0
0
0.027944
0.209779
634
24
84
26.416667
0.796407
0.233438
0
0
0
0
0.178423
0
0
0
0
0
0
1
0.090909
false
0
0.181818
0
0.272727
0.272727
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a6eef44c90456b4e29cb5273e1126093472758f
101,780
py
Python
xarray/core/variable.py
timgates42/xarray
bf0fe2caca1d2ebc4f1298f019758baa12f68b94
[ "Apache-2.0" ]
null
null
null
xarray/core/variable.py
timgates42/xarray
bf0fe2caca1d2ebc4f1298f019758baa12f68b94
[ "Apache-2.0" ]
null
null
null
xarray/core/variable.py
timgates42/xarray
bf0fe2caca1d2ebc4f1298f019758baa12f68b94
[ "Apache-2.0" ]
1
2021-07-13T07:06:10.000Z
2021-07-13T07:06:10.000Z
import copy import functools import itertools import numbers import warnings from collections import defaultdict from datetime import timedelta from distutils.version import LooseVersion from typing import ( Any, Dict, Hashable, Mapping, Optional, Sequence, Tuple, TypeVar, Union, ) import numpy as np import pandas as pd import xarray as xr # only for Dataset and DataArray from . import arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils from .indexing import ( BasicIndexer, OuterIndexer, PandasIndexAdapter, VectorizedIndexer, as_indexable, ) from .npcompat import IS_NEP18_ACTIVE from .options import _get_keep_attrs from .pycompat import ( cupy_array_type, dask_array_type, integer_types, is_duck_dask_array, ) from .utils import ( OrderedSet, _default, decode_numpy_dict_values, drop_dims_from_indexers, either_dict_or_kwargs, ensure_us_time_resolution, infix_dims, is_duck_array, ) NON_NUMPY_SUPPORTED_ARRAY_TYPES = ( ( indexing.ExplicitlyIndexed, pd.Index, ) + dask_array_type + cupy_array_type ) # https://github.com/python/mypy/issues/224 BASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore VariableType = TypeVar("VariableType", bound="Variable") """Type annotation to be used when methods of Variable return self or a copy of self. When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the output as an instance of the subclass. Usage:: class Variable: def f(self: VariableType, ...) -> VariableType: ... """ class MissingDimensionsError(ValueError): """Error class used when we can't safely guess a dimension name.""" # inherits from ValueError for backward compatibility # TODO: move this to an xarray.exceptions module? def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]": """Convert an object into a Variable. Parameters ---------- obj : object Object to convert into a Variable. - If the object is already a Variable, return a shallow copy. - Otherwise, if the object has 'dims' and 'data' attributes, convert it into a new Variable. - If all else fails, attempt to convert the object into a Variable by unpacking it into the arguments for creating a new Variable. name : str, optional If provided: - `obj` can be a 1D array, which is assumed to label coordinate values along a dimension of this given name. - Variables with name matching one of their dimensions are converted into `IndexVariable` objects. Returns ------- var : Variable The newly created variable. """ from .dataarray import DataArray # TODO: consider extending this method to automatically handle Iris and if isinstance(obj, DataArray): # extract the primary Variable from DataArrays obj = obj.variable if isinstance(obj, Variable): obj = obj.copy(deep=False) elif isinstance(obj, tuple): try: obj = Variable(*obj) except (TypeError, ValueError) as error: # use .format() instead of % because it handles tuples consistently raise error.__class__( "Could not convert tuple of form " "(dims, data[, attrs, encoding]): " "{} to Variable.".format(obj) ) elif utils.is_scalar(obj): obj = Variable([], obj) elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None: obj = Variable(obj.name, obj) elif isinstance(obj, (set, dict)): raise TypeError("variable {!r} has invalid type {!r}".format(name, type(obj))) elif name is not None: data = as_compatible_data(obj) if data.ndim != 1: raise MissingDimensionsError( "cannot set variable %r with %r-dimensional data " "without explicit dimension names. Pass a tuple of " "(dims, data) instead." % (name, data.ndim) ) obj = Variable(name, data, fastpath=True) else: raise TypeError( "unable to convert object into a variable without an " "explicit list of dimensions: %r" % obj ) if name is not None and name in obj.dims: # convert the Variable into an Index if obj.ndim != 1: raise MissingDimensionsError( "%r has more than 1-dimension and the same name as one of its " "dimensions %r. xarray disallows such variables because they " "conflict with the coordinates used to label " "dimensions." % (name, obj.dims) ) obj = obj.to_index_variable() return obj def _maybe_wrap_data(data): """ Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure they can be indexed properly. NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should all pass through unmodified. """ if isinstance(data, pd.Index): return PandasIndexAdapter(data) return data def _possibly_convert_objects(values): """Convert arrays of datetime.datetime and datetime.timedelta objects into datetime64 and timedelta64, according to the pandas convention. Also used for validating that datetime64 and timedelta64 objects are within the valid date range for ns precision, as pandas will raise an error if they are not. """ return np.asarray(pd.Series(values.ravel())).reshape(values.shape) def as_compatible_data(data, fastpath=False): """Prepare and wrap data to put in a Variable. - If data does not have the necessary attributes, convert it to ndarray. - If data has dtype=datetime64, ensure that it has ns precision. If it's a pandas.Timestamp, convert it to datetime64. - If data is already a pandas or xarray object (other than an Index), just use the values. Finally, wrap it up with an adapter if necessary. """ if fastpath and getattr(data, "ndim", 0) > 0: # can't use fastpath (yet) for scalars return _maybe_wrap_data(data) if isinstance(data, Variable): return data.data if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES): return _maybe_wrap_data(data) if isinstance(data, tuple): data = utils.to_0d_object_array(data) if isinstance(data, pd.Timestamp): # TODO: convert, handle datetime objects, too data = np.datetime64(data.value, "ns") if isinstance(data, timedelta): data = np.timedelta64(getattr(data, "value", data), "ns") # we don't want nested self-described arrays data = getattr(data, "values", data) if isinstance(data, np.ma.MaskedArray): mask = np.ma.getmaskarray(data) if mask.any(): dtype, fill_value = dtypes.maybe_promote(data.dtype) data = np.asarray(data, dtype=dtype) data[mask] = fill_value else: data = np.asarray(data) if not isinstance(data, np.ndarray): if hasattr(data, "__array_function__"): if IS_NEP18_ACTIVE: return data else: raise TypeError( "Got an NumPy-like array type providing the " "__array_function__ protocol but NEP18 is not enabled. " "Check that numpy >= v1.16 and that the environment " 'variable "NUMPY_EXPERIMENTAL_ARRAY_FUNCTION" is set to ' '"1"' ) # validate whether the data is valid data types. data = np.asarray(data) if isinstance(data, np.ndarray): if data.dtype.kind == "O": data = _possibly_convert_objects(data) elif data.dtype.kind == "M": data = _possibly_convert_objects(data) elif data.dtype.kind == "m": data = _possibly_convert_objects(data) return _maybe_wrap_data(data) def _as_array_or_item(data): """Return the given values as a numpy array, or as an individual item if it's a 0d datetime64 or timedelta64 array. Importantly, this function does not copy data if it is already an ndarray - otherwise, it will not be possible to update Variable values in place. This function mostly exists because 0-dimensional ndarrays with dtype=datetime64 are broken :( https://github.com/numpy/numpy/issues/4337 https://github.com/numpy/numpy/issues/7619 TODO: remove this (replace with np.asarray) once these issues are fixed """ if isinstance(data, cupy_array_type): data = data.get() else: data = np.asarray(data) if data.ndim == 0: if data.dtype.kind == "M": data = np.datetime64(data, "ns") elif data.dtype.kind == "m": data = np.timedelta64(data, "ns") return data class Variable( common.AbstractArray, arithmetic.SupportsArithmetic, utils.NdimSizeLenMixin ): """A netcdf-like variable consisting of dimensions, data and attributes which describe a single Array. A single Variable object is not fully described outside the context of its parent Dataset (if you want such a fully described object, use a DataArray instead). The main functional difference between Variables and numpy arrays is that numerical operations on Variables implement array broadcasting by dimension name. For example, adding an Variable with dimensions `('time',)` to another Variable with dimensions `('space',)` results in a new Variable with dimensions `('time', 'space')`. Furthermore, numpy reduce operations like ``mean`` or ``sum`` are overwritten to take a "dimension" argument instead of an "axis". Variables are light-weight objects used as the building block for datasets. They are more primitive objects, so operations with them provide marginally higher performance than using DataArrays. However, manipulating data in the form of a Dataset or DataArray should almost always be preferred, because they can use more complete metadata in context of coordinate labels. """ __slots__ = ("_dims", "_data", "_attrs", "_encoding") def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False): """ Parameters ---------- dims : str or sequence of str Name(s) of the the data dimension(s). Must be either a string (only for 1D data) or a sequence of strings with length equal to the number of dimensions. data : array_like Data array which supports numpy-like data access. attrs : dict_like or None, optional Attributes to assign to the new variable. If None (default), an empty attribute dictionary is initialized. encoding : dict_like or None, optional Dictionary specifying how to encode this array's data into a serialized format like netCDF4. Currently used keys (for netCDF) include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'. Well-behaved code to serialize a Variable should ignore unrecognized encoding items. """ self._data = as_compatible_data(data, fastpath=fastpath) self._dims = self._parse_dimensions(dims) self._attrs = None self._encoding = None if attrs is not None: self.attrs = attrs if encoding is not None: self.encoding = encoding @property def dtype(self): return self._data.dtype @property def shape(self): return self._data.shape @property def nbytes(self): return self.size * self.dtype.itemsize @property def _in_memory(self): return isinstance(self._data, (np.ndarray, np.number, PandasIndexAdapter)) or ( isinstance(self._data, indexing.MemoryCachedArray) and isinstance(self._data.array, indexing.NumpyIndexingAdapter) ) @property def data(self): if is_duck_array(self._data): return self._data else: return self.values @data.setter def data(self, data): data = as_compatible_data(data) if data.shape != self.shape: raise ValueError( f"replacement data must match the Variable's shape. " f"replacement data has shape {data.shape}; Variable has shape {self.shape}" ) self._data = data def astype( self: VariableType, dtype, *, order=None, casting=None, subok=None, copy=None, keep_attrs=True, ) -> VariableType: """ Copy of the Variable object, with data cast to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout order of the result. ‘C’ means C order, ‘F’ means Fortran order, ‘A’ means ‘F’ order if all the arrays are Fortran contiguous, ‘C’ order otherwise, and ‘K’ means as close to the order the array elements appear in memory as possible. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array. copy : bool, optional By default, astype always returns a newly allocated array. If this is set to False and the `dtype` requirement is satisfied, the input array is returned instead of a copy. keep_attrs : bool, optional By default, astype keeps attributes. Set to False to remove attributes in the returned object. Returns ------- out : same as object New object with data cast to the specified type. Notes ----- The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed through to the ``astype`` method of the underlying array when a value different than ``None`` is supplied. Make sure to only supply these arguments if the underlying array class supports them. See also -------- numpy.ndarray.astype dask.array.Array.astype sparse.COO.astype """ from .computation import apply_ufunc kwargs = dict(order=order, casting=casting, subok=subok, copy=copy) kwargs = {k: v for k, v in kwargs.items() if v is not None} return apply_ufunc( duck_array_ops.astype, self, dtype, kwargs=kwargs, keep_attrs=keep_attrs, dask="allowed", ) def load(self, **kwargs): """Manually trigger loading of this variable's data from disk or a remote source into memory and return this variable. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. See Also -------- dask.array.compute """ if is_duck_dask_array(self._data): self._data = as_compatible_data(self._data.compute(**kwargs)) elif not is_duck_array(self._data): self._data = np.asarray(self._data) return self def compute(self, **kwargs): """Manually trigger loading of this variable's data from disk or a remote source into memory and return a new variable. The original is left unaltered. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. See Also -------- dask.array.compute """ new = self.copy(deep=False) return new.load(**kwargs) def __dask_tokenize__(self): # Use v.data, instead of v._data, in order to cope with the wrappers # around NetCDF and the like from dask.base import normalize_token return normalize_token((type(self), self._dims, self.data, self._attrs)) def __dask_graph__(self): if is_duck_dask_array(self._data): return self._data.__dask_graph__() else: return None def __dask_keys__(self): return self._data.__dask_keys__() def __dask_layers__(self): return self._data.__dask_layers__() @property def __dask_optimize__(self): return self._data.__dask_optimize__ @property def __dask_scheduler__(self): return self._data.__dask_scheduler__ def __dask_postcompute__(self): array_func, array_args = self._data.__dask_postcompute__() return ( self._dask_finalize, (array_func, array_args, self._dims, self._attrs, self._encoding), ) def __dask_postpersist__(self): array_func, array_args = self._data.__dask_postpersist__() return ( self._dask_finalize, (array_func, array_args, self._dims, self._attrs, self._encoding), ) @staticmethod def _dask_finalize(results, array_func, array_args, dims, attrs, encoding): data = array_func(results, *array_args) return Variable(dims, data, attrs=attrs, encoding=encoding) @property def values(self): """The variable's data as a numpy.ndarray""" return _as_array_or_item(self._data) @values.setter def values(self, values): self.data = values def to_base_variable(self): """Return this variable as a base xarray.Variable""" return Variable( self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True ) to_variable = utils.alias(to_base_variable, "to_variable") def to_index_variable(self): """Return this variable as an xarray.IndexVariable""" return IndexVariable( self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True ) to_coord = utils.alias(to_index_variable, "to_coord") def to_index(self): """Convert this variable to a pandas.Index""" return self.to_index_variable().to_index() def to_dict(self, data=True): """Dictionary representation of variable.""" item = {"dims": self.dims, "attrs": decode_numpy_dict_values(self.attrs)} if data: item["data"] = ensure_us_time_resolution(self.values).tolist() else: item.update({"dtype": str(self.dtype), "shape": self.shape}) return item @property def dims(self): """Tuple of dimension names with which this variable is associated.""" return self._dims @dims.setter def dims(self, value): self._dims = self._parse_dimensions(value) def _parse_dimensions(self, dims): if isinstance(dims, str): dims = (dims,) dims = tuple(dims) if len(dims) != self.ndim: raise ValueError( "dimensions %s must have the same length as the " "number of data dimensions, ndim=%s" % (dims, self.ndim) ) return dims def _item_key_to_tuple(self, key): if utils.is_dict_like(key): return tuple(key.get(dim, slice(None)) for dim in self.dims) else: return key def _broadcast_indexes(self, key): """Prepare an indexing key for an indexing operation. Parameters ----------- key: int, slice, array-like, dict or tuple of integer, slice and array-like Any valid input for indexing. Returns ------- dims : tuple Dimension of the resultant variable. indexers : IndexingTuple subclass Tuple of integer, array-like, or slices to use when indexing self._data. The type of this argument indicates the type of indexing to perform, either basic, outer or vectorized. new_order : Optional[Sequence[int]] Optional reordering to do on the result of indexing. If not None, the first len(new_order) indexing should be moved to these positions. """ key = self._item_key_to_tuple(key) # key is a tuple # key is a tuple of full size key = indexing.expanded_indexer(key, self.ndim) # Convert a scalar Variable to an integer key = tuple( k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k for k in key ) # Convert a 0d-array to an integer key = tuple( k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key ) if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key): return self._broadcast_indexes_basic(key) self._validate_indexers(key) # Detect it can be mapped as an outer indexer # If all key is unlabeled, or # key can be mapped as an OuterIndexer. if all(not isinstance(k, Variable) for k in key): return self._broadcast_indexes_outer(key) # If all key is 1-dimensional and there are no duplicate labels, # key can be mapped as an OuterIndexer. dims = [] for k, d in zip(key, self.dims): if isinstance(k, Variable): if len(k.dims) > 1: return self._broadcast_indexes_vectorized(key) dims.append(k.dims[0]) elif not isinstance(k, integer_types): dims.append(d) if len(set(dims)) == len(dims): return self._broadcast_indexes_outer(key) return self._broadcast_indexes_vectorized(key) def _broadcast_indexes_basic(self, key): dims = tuple( dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types) ) return dims, BasicIndexer(key), None def _validate_indexers(self, key): """ Make sanity checks """ for dim, k in zip(self.dims, key): if isinstance(k, BASIC_INDEXING_TYPES): pass else: if not isinstance(k, Variable): k = np.asarray(k) if k.ndim > 1: raise IndexError( "Unlabeled multi-dimensional array cannot be " "used for indexing: {}".format(k) ) if k.dtype.kind == "b": if self.shape[self.get_axis_num(dim)] != len(k): raise IndexError( "Boolean array size {:d} is used to index array " "with shape {:s}.".format(len(k), str(self.shape)) ) if k.ndim > 1: raise IndexError( "{}-dimensional boolean indexing is " "not supported. ".format(k.ndim) ) if getattr(k, "dims", (dim,)) != (dim,): raise IndexError( "Boolean indexer should be unlabeled or on the " "same dimension to the indexed array. Indexer is " "on {:s} but the target dimension is {:s}.".format( str(k.dims), dim ) ) def _broadcast_indexes_outer(self, key): dims = tuple( k.dims[0] if isinstance(k, Variable) else dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types) ) new_key = [] for k in key: if isinstance(k, Variable): k = k.data if not isinstance(k, BASIC_INDEXING_TYPES): k = np.asarray(k) if k.size == 0: # Slice by empty list; numpy could not infer the dtype k = k.astype(int) elif k.dtype.kind == "b": (k,) = np.nonzero(k) new_key.append(k) return dims, OuterIndexer(tuple(new_key)), None def _nonzero(self): """ Equivalent numpy's nonzero but returns a tuple of Varibles. """ # TODO we should replace dask's native nonzero # after https://github.com/dask/dask/issues/1076 is implemented. nonzeros = np.nonzero(self.data) return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims)) def _broadcast_indexes_vectorized(self, key): variables = [] out_dims_set = OrderedSet() for dim, value in zip(self.dims, key): if isinstance(value, slice): out_dims_set.add(dim) else: variable = ( value if isinstance(value, Variable) else as_variable(value, name=dim) ) if variable.dtype.kind == "b": # boolean indexing case (variable,) = variable._nonzero() variables.append(variable) out_dims_set.update(variable.dims) variable_dims = set() for variable in variables: variable_dims.update(variable.dims) slices = [] for i, (dim, value) in enumerate(zip(self.dims, key)): if isinstance(value, slice): if dim in variable_dims: # We only convert slice objects to variables if they share # a dimension with at least one other variable. Otherwise, # we can equivalently leave them as slices aknd transpose # the result. This is significantly faster/more efficient # for most array backends. values = np.arange(*value.indices(self.sizes[dim])) variables.insert(i - len(slices), Variable((dim,), values)) else: slices.append((i, value)) try: variables = _broadcast_compat_variables(*variables) except ValueError: raise IndexError(f"Dimensions of indexers mismatch: {key}") out_key = [variable.data for variable in variables] out_dims = tuple(out_dims_set) slice_positions = set() for i, value in slices: out_key.insert(i, value) new_position = out_dims.index(self.dims[i]) slice_positions.add(new_position) if slice_positions: new_order = [i for i in range(len(out_dims)) if i not in slice_positions] else: new_order = None return out_dims, VectorizedIndexer(tuple(out_key)), new_order def __getitem__(self: VariableType, key) -> VariableType: """Return a new Variable object whose contents are consistent with getting the provided key from the underlying data. NB. __getitem__ and __setitem__ implement xarray-style indexing, where if keys are unlabeled arrays, we index the array orthogonally with them. If keys are labeled array (such as Variables), they are broadcasted with our usual scheme and then the array is indexed with the broadcasted key, like numpy's fancy indexing. If you really want to do indexing like `x[x > 0]`, manipulate the numpy array `x.values` directly. """ dims, indexer, new_order = self._broadcast_indexes(key) data = as_indexable(self._data)[indexer] if new_order: data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order) return self._finalize_indexing_result(dims, data) def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType: """Used by IndexVariable to return IndexVariable objects when possible.""" return type(self)(dims, data, self._attrs, self._encoding, fastpath=True) def _getitem_with_mask(self, key, fill_value=dtypes.NA): """Index this Variable with -1 remapped to fill_value.""" # TODO(shoyer): expose this method in public API somewhere (isel?) and # use it for reindex. # TODO(shoyer): add a sanity check that all other integers are # non-negative # TODO(shoyer): add an optimization, remapping -1 to an adjacent value # that is actually indexed rather than mapping it to the last value # along each axis. if fill_value is dtypes.NA: fill_value = dtypes.get_fill_value(self.dtype) dims, indexer, new_order = self._broadcast_indexes(key) if self.size: if is_duck_dask_array(self._data): # dask's indexing is faster this way; also vindex does not # support negative indices yet: # https://github.com/dask/dask/pull/2967 actual_indexer = indexing.posify_mask_indexer(indexer) else: actual_indexer = indexer data = as_indexable(self._data)[actual_indexer] mask = indexing.create_mask(indexer, self.shape, data) # we need to invert the mask in order to pass data first. This helps # pint to choose the correct unit # TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed data = duck_array_ops.where(np.logical_not(mask), data, fill_value) else: # array cannot be indexed along dimensions of size 0, so just # build the mask directly instead. mask = indexing.create_mask(indexer, self.shape) data = np.broadcast_to(fill_value, getattr(mask, "shape", ())) if new_order: data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order) return self._finalize_indexing_result(dims, data) def __setitem__(self, key, value): """__setitem__ is overloaded to access the underlying numpy values with orthogonal indexing. See __getitem__ for more details. """ dims, index_tuple, new_order = self._broadcast_indexes(key) if not isinstance(value, Variable): value = as_compatible_data(value) if value.ndim > len(dims): raise ValueError( "shape mismatch: value array of shape %s could not be " "broadcast to indexing result with %s dimensions" % (value.shape, len(dims)) ) if value.ndim == 0: value = Variable((), value) else: value = Variable(dims[-value.ndim :], value) # broadcast to become assignable value = value.set_dims(dims).data if new_order: value = duck_array_ops.asarray(value) value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)] value = duck_array_ops.moveaxis(value, new_order, range(len(new_order))) indexable = as_indexable(self._data) indexable[index_tuple] = value @property def attrs(self) -> Dict[Hashable, Any]: """Dictionary of local attributes on this variable.""" if self._attrs is None: self._attrs = {} return self._attrs @attrs.setter def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) @property def encoding(self): """Dictionary of encodings on this variable.""" if self._encoding is None: self._encoding = {} return self._encoding @encoding.setter def encoding(self, value): try: self._encoding = dict(value) except ValueError: raise ValueError("encoding must be castable to a dictionary") def copy(self, deep=True, data=None): """Returns a copy of this object. If `deep=True`, the data array is loaded into memory and copied onto the new object. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Whether the data array is loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. When `data` is used, `deep` is ignored. Returns ------- object : Variable New object with dimensions, attributes, encodings, and optionally data copied from original. Examples -------- Shallow copy versus deep copy >>> var = xr.Variable(data=[1, 2, 3], dims="x") >>> var.copy() <xarray.Variable (x: 3)> array([1, 2, 3]) >>> var_0 = var.copy(deep=False) >>> var_0[0] = 7 >>> var_0 <xarray.Variable (x: 3)> array([7, 2, 3]) >>> var <xarray.Variable (x: 3)> array([7, 2, 3]) Changing the data using the ``data`` argument maintains the structure of the original object, but with the new data. Original object is unaffected. >>> var.copy(data=[0.1, 0.2, 0.3]) <xarray.Variable (x: 3)> array([0.1, 0.2, 0.3]) >>> var <xarray.Variable (x: 3)> array([7, 2, 3]) See Also -------- pandas.DataFrame.copy """ if data is None: data = self._data if isinstance(data, indexing.MemoryCachedArray): # don't share caching between copies data = indexing.MemoryCachedArray(data.array) if deep: data = copy.deepcopy(data) else: data = as_compatible_data(data) if self.shape != data.shape: raise ValueError( "Data shape {} must match shape of object {}".format( data.shape, self.shape ) ) # note: # dims is already an immutable tuple # attributes and encoding will be copied when the new Array is created return self._replace(data=data) def _replace( self, dims=_default, data=_default, attrs=_default, encoding=_default ) -> "Variable": if dims is _default: dims = copy.copy(self._dims) if data is _default: data = copy.copy(self.data) if attrs is _default: attrs = copy.copy(self._attrs) if encoding is _default: encoding = copy.copy(self._encoding) return type(self)(dims, data, attrs, encoding, fastpath=True) def __copy__(self): return self.copy(deep=False) def __deepcopy__(self, memo=None): # memo does nothing but is required for compatibility with # copy.deepcopy return self.copy(deep=True) # mutable objects should not be hashable # https://github.com/python/mypy/issues/4266 __hash__ = None # type: ignore @property def chunks(self): """Block dimensions for this array's data or None if it's not a dask array. """ return getattr(self._data, "chunks", None) _array_counter = itertools.count() def chunk(self, chunks={}, name=None, lock=False): """Coerce this array's data into a dask arrays with the given chunks. If this variable is a non-dask array, it will be converted to dask array. If it's a dask array, it will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Parameters ---------- chunks : int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. name : str, optional Used to generate the name for this array in the internal dask graph. Does not need not be unique. lock : optional Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. Returns ------- chunked : xarray.Variable """ import dask import dask.array as da if chunks is None: warnings.warn( "None value for 'chunks' is deprecated. " "It will raise an error in the future. Use instead '{}'", category=FutureWarning, ) chunks = {} if utils.is_dict_like(chunks): chunks = {self.get_axis_num(dim): chunk for dim, chunk in chunks.items()} data = self._data if is_duck_dask_array(data): data = data.rechunk(chunks) else: if isinstance(data, indexing.ExplicitlyIndexed): # Unambiguously handle array storage backends (like NetCDF4 and h5py) # that can't handle general array indexing. For example, in netCDF4 you # can do "outer" indexing along two dimensions independent, which works # differently from how NumPy handles it. # da.from_array works by using lazy indexing with a tuple of slices. # Using OuterIndexer is a pragmatic choice: dask does not yet handle # different indexing types in an explicit way: # https://github.com/dask/dask/issues/2883 data = indexing.ImplicitToExplicitIndexingAdapter( data, indexing.OuterIndexer ) if LooseVersion(dask.__version__) < "2.0.0": kwargs = {} else: # All of our lazily loaded backend array classes should use NumPy # array operations. kwargs = {"meta": np.ndarray} else: kwargs = {} if utils.is_dict_like(chunks): chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape)) data = da.from_array(data, chunks, name=name, lock=lock, **kwargs) return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True) def _as_sparse(self, sparse_format=_default, fill_value=dtypes.NA): """ use sparse-array as backend. """ import sparse # TODO: what to do if dask-backended? if fill_value is dtypes.NA: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: dtype = dtypes.result_type(self.dtype, fill_value) if sparse_format is _default: sparse_format = "coo" try: as_sparse = getattr(sparse, f"as_{sparse_format.lower()}") except AttributeError: raise ValueError(f"{sparse_format} is not a valid sparse format") data = as_sparse(self.data.astype(dtype), fill_value=fill_value) return self._replace(data=data) def _to_dense(self): """ Change backend from sparse to np.array """ if hasattr(self._data, "todense"): return self._replace(data=self._data.todense()) return self.copy(deep=False) def isel( self: VariableType, indexers: Mapping[Hashable, Any] = None, missing_dims: str = "raise", **indexers_kwargs: Any, ) -> VariableType: """Return a new array indexed along the specified dimension(s). Parameters ---------- **indexers : {dim: indexer, ...} Keyword arguments with names matching dimensions and values given by integers, slice objects or arrays. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception - "warning": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- obj : Array object A new Array with the selected data and dimensions. In general, the new variable's data will be a view of this variable's data, unless numpy fancy indexing was triggered by using an array indexer, in which case the data will be a copy. """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel") indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims) key = tuple(indexers.get(dim, slice(None)) for dim in self.dims) return self[key] def squeeze(self, dim=None): """Return a new object with squeezed data. Parameters ---------- dim : None or str or tuple of str, optional Selects a subset of the length one dimensions. If a dimension is selected with length greater than one, an error is raised. If None, all length one dimensions are squeezed. Returns ------- squeezed : same type as caller This object, but with with all or a subset of the dimensions of length 1 removed. See Also -------- numpy.squeeze """ dims = common.get_squeeze_dims(self, dim) return self.isel({d: 0 for d in dims}) def _shift_one_dim(self, dim, count, fill_value=dtypes.NA): axis = self.get_axis_num(dim) if count > 0: keep = slice(None, -count) elif count < 0: keep = slice(-count, None) else: keep = slice(None) trimmed_data = self[(slice(None),) * axis + (keep,)].data if fill_value is dtypes.NA: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: dtype = self.dtype width = min(abs(count), self.shape[axis]) dim_pad = (width, 0) if count >= 0 else (0, width) pads = [(0, 0) if d != dim else dim_pad for d in self.dims] data = duck_array_ops.pad( trimmed_data.astype(dtype), pads, mode="constant", constant_values=fill_value, ) if is_duck_dask_array(data): # chunked data should come out with the same chunks; this makes # it feasible to combine shifted and unshifted data # TODO: remove this once dask.array automatically aligns chunks data = data.rechunk(self.data.chunks) return type(self)(self.dims, data, self._attrs, fastpath=True) def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs): """ Return a new Variable with shifted data. Parameters ---------- shifts : mapping of the form {dim: offset} Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value: scalar, optional Value to use for newly missing values **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Variable Variable with the same dimensions and attributes but shifted data. """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift") result = self for dim, count in shifts.items(): result = result._shift_one_dim(dim, count, fill_value=fill_value) return result def _pad_options_dim_to_index( self, pad_option: Mapping[Hashable, Union[int, Tuple[int, int]]], fill_with_shape=False, ): if fill_with_shape: return [ (n, n) if d not in pad_option else pad_option[d] for d, n in zip(self.dims, self.data.shape) ] return [(0, 0) if d not in pad_option else pad_option[d] for d in self.dims] def pad( self, pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None, mode: str = "constant", stat_length: Union[ int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] ] = None, constant_values: Union[ int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] ] = None, end_values: Union[ int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] ] = None, reflect_type: str = None, **pad_width_kwargs: Any, ): """ Return a new Variable with padded data. Parameters ---------- pad_width : mapping of hashable to tuple of int Mapping with the form of {dim: (pad_before, pad_after)} describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad mode : str, default: "constant" See numpy / Dask docs stat_length : int, tuple or mapping of hashable to tuple Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. constant_values : scalar, tuple or mapping of hashable to tuple Used in 'constant'. The values to set the padded values for each axis. end_values : scalar, tuple or mapping of hashable to tuple Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. reflect_type : {"even", "odd"}, optional Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. **pad_width_kwargs One of pad_width or pad_width_kwargs must be provided. Returns ------- padded : Variable Variable with the same dimensions and attributes but padded data. """ pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad") # change default behaviour of pad with mode constant if mode == "constant" and ( constant_values is None or constant_values is dtypes.NA ): dtype, constant_values = dtypes.maybe_promote(self.dtype) else: dtype = self.dtype # create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty if isinstance(stat_length, dict): stat_length = self._pad_options_dim_to_index( stat_length, fill_with_shape=True ) if isinstance(constant_values, dict): constant_values = self._pad_options_dim_to_index(constant_values) if isinstance(end_values, dict): end_values = self._pad_options_dim_to_index(end_values) # workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303 if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]: stat_length = [(n, n) for n in self.data.shape] # type: ignore # change integer values to a tuple of two of those values and change pad_width to index for k, v in pad_width.items(): if isinstance(v, numbers.Number): pad_width[k] = (v, v) pad_width_by_index = self._pad_options_dim_to_index(pad_width) # create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty pad_option_kwargs = {} if stat_length is not None: pad_option_kwargs["stat_length"] = stat_length if constant_values is not None: pad_option_kwargs["constant_values"] = constant_values if end_values is not None: pad_option_kwargs["end_values"] = end_values if reflect_type is not None: pad_option_kwargs["reflect_type"] = reflect_type # type: ignore array = duck_array_ops.pad( self.data.astype(dtype, copy=False), pad_width_by_index, mode=mode, **pad_option_kwargs, ) return type(self)(self.dims, array) def _roll_one_dim(self, dim, count): axis = self.get_axis_num(dim) count %= self.shape[axis] if count != 0: indices = [slice(-count, None), slice(None, -count)] else: indices = [slice(None)] arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices] data = duck_array_ops.concatenate(arrays, axis) if is_duck_dask_array(data): # chunked data should come out with the same chunks; this makes # it feasible to combine shifted and unshifted data # TODO: remove this once dask.array automatically aligns chunks data = data.rechunk(self.data.chunks) return type(self)(self.dims, data, self._attrs, fastpath=True) def roll(self, shifts=None, **shifts_kwargs): """ Return a new Variable with rolld data. Parameters ---------- shifts : mapping of hashable to int Integer offset to roll along each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Variable Variable with the same dimensions and attributes but rolled data. """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll") result = self for dim, count in shifts.items(): result = result._roll_one_dim(dim, count) return result def transpose(self, *dims) -> "Variable": """Return a new Variable object with transposed dimensions. Parameters ---------- *dims : str, optional By default, reverse the dimensions. Otherwise, reorder the dimensions to this order. Returns ------- transposed : Variable The returned object has transposed data and dimensions with the same attributes as the original. Notes ----- This operation returns a view of this variable's data. It is lazy for dask-backed Variables but not for numpy-backed Variables. See Also -------- numpy.transpose """ if len(dims) == 0: dims = self.dims[::-1] dims = tuple(infix_dims(dims, self.dims)) axes = self.get_axis_num(dims) if len(dims) < 2 or dims == self.dims: # no need to transpose if only one dimension # or dims are in same order return self.copy(deep=False) data = as_indexable(self._data).transpose(axes) return type(self)(dims, data, self._attrs, self._encoding, fastpath=True) @property def T(self) -> "Variable": return self.transpose() def set_dims(self, dims, shape=None): """Return a new variable with given set of dimensions. This method might be used to attach new dimension(s) to variable. When possible, this operation does not copy this variable's data. Parameters ---------- dims : str or sequence of str or dict Dimensions to include on the new variable. If a dict, values are used to provide the sizes of new dimensions; otherwise, new dimensions are inserted with length 1. Returns ------- Variable """ if isinstance(dims, str): dims = [dims] if shape is None and utils.is_dict_like(dims): shape = dims.values() missing_dims = set(self.dims) - set(dims) if missing_dims: raise ValueError( "new dimensions %r must be a superset of " "existing dimensions %r" % (dims, self.dims) ) self_dims = set(self.dims) expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims if self.dims == expanded_dims: # don't use broadcast_to unless necessary so the result remains # writeable if possible expanded_data = self.data elif shape is not None: dims_map = dict(zip(dims, shape)) tmp_shape = tuple(dims_map[d] for d in expanded_dims) expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape) else: expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)] expanded_var = Variable( expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True ) return expanded_var.transpose(*dims) def _stack_once(self, dims, new_dim): if not set(dims) <= set(self.dims): raise ValueError("invalid existing dimensions: %s" % dims) if new_dim in self.dims: raise ValueError( "cannot create a new dimension with the same " "name as an existing dimension" ) if len(dims) == 0: # don't stack return self.copy(deep=False) other_dims = [d for d in self.dims if d not in dims] dim_order = other_dims + list(dims) reordered = self.transpose(*dim_order) new_shape = reordered.shape[: len(other_dims)] + (-1,) new_data = reordered.data.reshape(new_shape) new_dims = reordered.dims[: len(other_dims)] + (new_dim,) return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True) def stack(self, dimensions=None, **dimensions_kwargs): """ Stack any number of existing dimensions into a single new dimension. New dimensions will be added at the end, and the order of the data along each new dimension will be in contiguous (C) order. Parameters ---------- dimensions : mapping of hashable to tuple of hashable Mapping of form new_name=(dim1, dim2, ...) describing the names of new dimensions, and the existing dimensions that they replace. **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. Returns ------- stacked : Variable Variable with the same attributes but stacked data. See also -------- Variable.unstack """ dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "stack") result = self for new_dim, dims in dimensions.items(): result = result._stack_once(dims, new_dim) return result def _unstack_once(self, dims, old_dim): new_dim_names = tuple(dims.keys()) new_dim_sizes = tuple(dims.values()) if old_dim not in self.dims: raise ValueError("invalid existing dimension: %s" % old_dim) if set(new_dim_names).intersection(self.dims): raise ValueError( "cannot create a new dimension with the same " "name as an existing dimension" ) if np.prod(new_dim_sizes) != self.sizes[old_dim]: raise ValueError( "the product of the new dimension sizes must " "equal the size of the old dimension" ) other_dims = [d for d in self.dims if d != old_dim] dim_order = other_dims + [old_dim] reordered = self.transpose(*dim_order) new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes new_data = reordered.data.reshape(new_shape) new_dims = reordered.dims[: len(other_dims)] + new_dim_names return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True) def unstack(self, dimensions=None, **dimensions_kwargs): """ Unstack an existing dimension into multiple new dimensions. New dimensions will be added at the end, and the order of the data along each new dimension will be in contiguous (C) order. Parameters ---------- dimensions : mapping of hashable to mapping of hashable to int Mapping of the form old_dim={dim1: size1, ...} describing the names of existing dimensions, and the new dimensions and sizes that they map to. **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. Returns ------- unstacked : Variable Variable with the same attributes but unstacked data. See also -------- Variable.stack """ dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "unstack") result = self for old_dim, dims in dimensions.items(): result = result._unstack_once(dims, old_dim) return result def fillna(self, value): return ops.fillna(self, value) def where(self, cond, other=dtypes.NA): return ops.where_method(self, cond, other) def reduce( self, func, dim=None, axis=None, keep_attrs=None, keepdims=False, **kwargs, ): """Reduce this array by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : str or sequence of str, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then the reduction is calculated over the flattened array (by calling `func(x)` without an axis argument). keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ if dim == ...: dim = None if dim is not None and axis is not None: raise ValueError("cannot supply both 'axis' and 'dim' arguments") if dim is not None: axis = self.get_axis_num(dim) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", r"Mean of empty slice", category=RuntimeWarning ) if axis is not None: data = func(self.data, axis=axis, **kwargs) else: data = func(self.data, **kwargs) if getattr(data, "shape", ()) == self.shape: dims = self.dims else: removed_axes = ( range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim ) if keepdims: # Insert np.newaxis for removed dims slices = tuple( np.newaxis if i in removed_axes else slice(None, None) for i in range(self.ndim) ) if getattr(data, "shape", None) is None: # Reduce has produced a scalar value, not an array-like data = np.asanyarray(data)[slices] else: data = data[slices] dims = self.dims else: dims = [ adim for n, adim in enumerate(self.dims) if n not in removed_axes ] if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) attrs = self._attrs if keep_attrs else None return Variable(dims, data, attrs=attrs) @classmethod def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False): """Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. """ if not isinstance(dim, str): (dim,) = dim.dims # can't do this lazily: we need to loop through variables at least # twice variables = list(variables) first_var = variables[0] arrays = [v.data for v in variables] if dim in first_var.dims: axis = first_var.get_axis_num(dim) dims = first_var.dims data = duck_array_ops.concatenate(arrays, axis=axis) if positions is not None: # TODO: deprecate this option -- we don't need it for groupby # any more. indices = nputils.inverse_permutation(np.concatenate(positions)) data = duck_array_ops.take(data, indices, axis=axis) else: axis = 0 dims = (dim,) + first_var.dims data = duck_array_ops.stack(arrays, axis=axis) attrs = dict(first_var.attrs) encoding = dict(first_var.encoding) if not shortcut: for var in variables: if var.dims != first_var.dims: raise ValueError( f"Variable has dimensions {list(var.dims)} but first Variable has dimensions {list(first_var.dims)}" ) return cls(dims, data, attrs, encoding) def equals(self, other, equiv=duck_array_ops.array_equiv): """True if two Variables have the same dimensions and values; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. This method is necessary because `v1 == v2` for Variables does element-wise comparisons (like numpy.ndarrays). """ other = getattr(other, "variable", other) try: return self.dims == other.dims and ( self._data is other._data or equiv(self.data, other.data) ) except (TypeError, AttributeError): return False def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv): """True if two Variables have the values after being broadcast against each other; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. """ try: self, other = broadcast_variables(self, other) except (ValueError, AttributeError): return False return self.equals(other, equiv=equiv) def identical(self, other, equiv=duck_array_ops.array_equiv): """Like equals, but also checks attributes.""" try: return utils.dict_equiv(self.attrs, other.attrs) and self.equals( other, equiv=equiv ) except (TypeError, AttributeError): return False def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv): """True if the intersection of two Variable's non-null data is equal; otherwise false. Variables can thus still be equal if there are locations where either, or both, contain NaN values. """ return self.broadcast_equals(other, equiv=equiv) def quantile( self, q, dim=None, interpolation="linear", keep_attrs=None, skipna=True ): """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements. Parameters ---------- q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- quantiles : Variable If `q` is a single quantile, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return array. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanquantile, pandas.Series.quantile, Dataset.quantile, DataArray.quantile """ from .computation import apply_ufunc _quantile_func = np.nanquantile if skipna else np.quantile if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) scalar = utils.is_scalar(q) q = np.atleast_1d(np.asarray(q, dtype=np.float64)) if dim is None: dim = self.dims if utils.is_scalar(dim): dim = [dim] def _wrapper(npa, **kwargs): # move quantile axis to end. required for apply_ufunc return np.moveaxis(_quantile_func(npa, **kwargs), 0, -1) axis = np.arange(-1, -1 * len(dim) - 1, -1) result = apply_ufunc( _wrapper, self, input_core_dims=[dim], exclude_dims=set(dim), output_core_dims=[["quantile"]], output_dtypes=[np.float64], dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}), dask="parallelized", kwargs={"q": q, "axis": axis, "interpolation": interpolation}, ) # for backward compatibility result = result.transpose("quantile", ...) if scalar: result = result.squeeze("quantile") if keep_attrs: result.attrs = self._attrs return result def rank(self, dim, pct=False): """Ranks the data. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks. NaNs in the input array are returned as NaNs. The `bottleneck` library is required. Parameters ---------- dim : str Dimension over which to compute rank. pct : bool, optional If True, compute percentage ranks, otherwise compute integer ranks. Returns ------- ranked : Variable See Also -------- Dataset.rank, DataArray.rank """ import bottleneck as bn data = self.data if is_duck_dask_array(data): raise TypeError( "rank does not work for arrays stored as dask " "arrays. Load the data via .compute() or .load() " "prior to calling this method." ) elif not isinstance(data, np.ndarray): raise TypeError( "rank is not implemented for {} objects.".format(type(data)) ) axis = self.get_axis_num(dim) func = bn.nanrankdata if self.dtype.kind == "f" else bn.rankdata ranked = func(data, axis=axis) if pct: count = np.sum(~np.isnan(data), axis=axis, keepdims=True) ranked /= count return Variable(self.dims, ranked) def rolling_window( self, dim, window, window_dim, center=False, fill_value=dtypes.NA ): """ Make a rolling_window along dim and add a new_dim to the last place. Parameters ---------- dim : str Dimension over which to compute rolling_window. For nd-rolling, should be list of dimensions. window : int Window size of the rolling For nd-rolling, should be list of integers. window_dim : str New name of the window dimension. For nd-rolling, should be list of integers. center : bool, default: False If True, pad fill_value for both ends. Otherwise, pad in the head of the axis. fill_value value to be filled. Returns ------- Variable that is a view of the original array with a added dimension of size w. The return dim: self.dims + (window_dim, ) The return shape: self.shape + (window, ) Examples -------- >>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4))) >>> v.rolling_window("b", 3, "window_dim") <xarray.Variable (a: 2, b: 4, window_dim: 3)> array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.]], <BLANKLINE> [[nan, nan, 4.], [nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.]]]) >>> v.rolling_window("b", 3, "window_dim", center=True) <xarray.Variable (a: 2, b: 4, window_dim: 3)> array([[[nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.], [ 2., 3., nan]], <BLANKLINE> [[nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.], [ 6., 7., nan]]]) """ if fill_value is dtypes.NA: # np.nan is passed dtype, fill_value = dtypes.maybe_promote(self.dtype) array = self.astype(dtype, copy=False).data else: dtype = self.dtype array = self.data if isinstance(dim, list): assert len(dim) == len(window) assert len(dim) == len(window_dim) assert len(dim) == len(center) else: dim = [dim] window = [window] window_dim = [window_dim] center = [center] axis = [self.get_axis_num(d) for d in dim] new_dims = self.dims + tuple(window_dim) return Variable( new_dims, duck_array_ops.rolling_window( array, axis=axis, window=window, center=center, fill_value=fill_value ), ) def coarsen( self, windows, func, boundary="exact", side="left", keep_attrs=None, **kwargs ): """ Apply reduction function. """ windows = {k: v for k, v in windows.items() if k in self.dims} if not windows: return self.copy() if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) if keep_attrs: _attrs = self.attrs else: _attrs = None reshaped, axes = self._coarsen_reshape(windows, boundary, side) if isinstance(func, str): name = func func = getattr(duck_array_ops, name, None) if func is None: raise NameError(f"{name} is not a valid method.") return self._replace(data=func(reshaped, axis=axes, **kwargs), attrs=_attrs) def _coarsen_reshape(self, windows, boundary, side): """ Construct a reshaped-array for coarsen """ if not utils.is_dict_like(boundary): boundary = {d: boundary for d in windows.keys()} if not utils.is_dict_like(side): side = {d: side for d in windows.keys()} # remove unrelated dimensions boundary = {k: v for k, v in boundary.items() if k in windows} side = {k: v for k, v in side.items() if k in windows} for d, window in windows.items(): if window <= 0: raise ValueError(f"window must be > 0. Given {window}") variable = self for d, window in windows.items(): # trim or pad the object size = variable.shape[self._get_axis_num(d)] n = int(size / window) if boundary[d] == "exact": if n * window != size: raise ValueError( "Could not coarsen a dimension of size {} with " "window {}".format(size, window) ) elif boundary[d] == "trim": if side[d] == "left": variable = variable.isel({d: slice(0, window * n)}) else: excess = size - window * n variable = variable.isel({d: slice(excess, None)}) elif boundary[d] == "pad": # pad pad = window * n - size if pad < 0: pad += window if side[d] == "left": pad_width = {d: (0, pad)} else: pad_width = {d: (pad, 0)} variable = variable.pad(pad_width, mode="constant") else: raise TypeError( "{} is invalid for boundary. Valid option is 'exact', " "'trim' and 'pad'".format(boundary[d]) ) shape = [] axes = [] axis_count = 0 for i, d in enumerate(variable.dims): if d in windows: size = variable.shape[i] shape.append(int(size / windows[d])) shape.append(windows[d]) axis_count += 1 axes.append(i + axis_count) else: shape.append(variable.shape[i]) return variable.data.reshape(shape), tuple(axes) def isnull(self, keep_attrs: bool = None): """Test each value in the array for whether it is a missing value. Returns ------- isnull : Variable Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.isnull Examples -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var <xarray.Variable (x: 3)> array([ 1., nan, 3.]) >>> var.isnull() <xarray.Variable (x: 3)> array([False, True, False]) """ from .computation import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) return apply_ufunc( duck_array_ops.isnull, self, dask="allowed", keep_attrs=keep_attrs, ) def notnull(self, keep_attrs: bool = None): """Test each value in the array for whether it is not a missing value. Returns ------- notnull : Variable Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.notnull Examples -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var <xarray.Variable (x: 3)> array([ 1., nan, 3.]) >>> var.notnull() <xarray.Variable (x: 3)> array([ True, False, True]) """ from .computation import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) return apply_ufunc( duck_array_ops.notnull, self, dask="allowed", keep_attrs=keep_attrs, ) @property def real(self): return type(self)(self.dims, self.data.real, self._attrs) @property def imag(self): return type(self)(self.dims, self.data.imag, self._attrs) def __array_wrap__(self, obj, context=None): return Variable(self.dims, obj) @staticmethod def _unary_op(f): @functools.wraps(f) def func(self, *args, **kwargs): keep_attrs = kwargs.pop("keep_attrs", None) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) with np.errstate(all="ignore"): result = self.__array_wrap__(f(self.data, *args, **kwargs)) if keep_attrs: result.attrs = self.attrs return result return func @staticmethod def _binary_op(f, reflexive=False, **ignored_kwargs): @functools.wraps(f) def func(self, other): if isinstance(other, (xr.DataArray, xr.Dataset)): return NotImplemented self_data, other_data, dims = _broadcast_compat_data(self, other) keep_attrs = _get_keep_attrs(default=False) attrs = self._attrs if keep_attrs else None with np.errstate(all="ignore"): new_data = ( f(self_data, other_data) if not reflexive else f(other_data, self_data) ) result = Variable(dims, new_data, attrs=attrs) return result return func @staticmethod def _inplace_binary_op(f): @functools.wraps(f) def func(self, other): if isinstance(other, xr.Dataset): raise TypeError("cannot add a Dataset to a Variable in-place") self_data, other_data, dims = _broadcast_compat_data(self, other) if dims != self.dims: raise ValueError("dimensions cannot change for in-place operations") with np.errstate(all="ignore"): self.values = f(self_data, other_data) return self return func def _to_numeric(self, offset=None, datetime_unit=None, dtype=float): """A (private) method to convert datetime array to numeric dtype See duck_array_ops.datetime_to_numeric """ numeric_array = duck_array_ops.datetime_to_numeric( self.data, offset, datetime_unit, dtype ) return type(self)(self.dims, numeric_array, self._attrs) def _unravel_argminmax( self, argminmax: str, dim: Union[Hashable, Sequence[Hashable], None], axis: Union[int, None], keep_attrs: Optional[bool], skipna: Optional[bool], ) -> Union["Variable", Dict[Hashable, "Variable"]]: """Apply argmin or argmax over one or more dimensions, returning the result as a dict of DataArray that can be passed directly to isel. """ if dim is None and axis is None: warnings.warn( "Behaviour of argmin/argmax with neither dim nor axis argument will " "change to return a dict of indices of each dimension. To get a " "single, flat index, please use np.argmin(da.data) or " "np.argmax(da.data) instead of da.argmin() or da.argmax().", DeprecationWarning, stacklevel=3, ) argminmax_func = getattr(duck_array_ops, argminmax) if dim is ...: # In future, should do this also when (dim is None and axis is None) dim = self.dims if ( dim is None or axis is not None or not isinstance(dim, Sequence) or isinstance(dim, str) ): # Return int index if single dimension is passed, and is not part of a # sequence return self.reduce( argminmax_func, dim=dim, axis=axis, keep_attrs=keep_attrs, skipna=skipna ) # Get a name for the new dimension that does not conflict with any existing # dimension newdimname = "_unravel_argminmax_dim_0" count = 1 while newdimname in self.dims: newdimname = f"_unravel_argminmax_dim_{count}" count += 1 stacked = self.stack({newdimname: dim}) result_dims = stacked.dims[:-1] reduce_shape = tuple(self.sizes[d] for d in dim) result_flat_indices = stacked.reduce(argminmax_func, axis=-1, skipna=skipna) result_unravelled_indices = duck_array_ops.unravel_index( result_flat_indices.data, reduce_shape ) result = { d: Variable(dims=result_dims, data=i) for d, i in zip(dim, result_unravelled_indices) } if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) if keep_attrs: for v in result.values(): v.attrs = self.attrs return result def argmin( self, dim: Union[Hashable, Sequence[Hashable]] = None, axis: int = None, keep_attrs: bool = None, skipna: bool = None, ) -> Union["Variable", Dict[Hashable, "Variable"]]: """Index or indices of the minimum of the Variable over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of Variables, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a Variable with dtype int. If there are multiple minima, the indices of the first one found will be returned. Parameters ---------- dim : hashable, sequence of hashable or ..., optional The dimensions over which to find the minimum. By default, finds minimum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Variable or dict of Variable See also -------- DataArray.argmin, DataArray.idxmin """ return self._unravel_argminmax("argmin", dim, axis, keep_attrs, skipna) def argmax( self, dim: Union[Hashable, Sequence[Hashable]] = None, axis: int = None, keep_attrs: bool = None, skipna: bool = None, ) -> Union["Variable", Dict[Hashable, "Variable"]]: """Index or indices of the maximum of the Variable over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of Variables, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a Variable with dtype int. If there are multiple maxima, the indices of the first one found will be returned. Parameters ---------- dim : hashable, sequence of hashable or ..., optional The dimensions over which to find the maximum. By default, finds maximum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Variable or dict of Variable See also -------- DataArray.argmax, DataArray.idxmax """ return self._unravel_argminmax("argmax", dim, axis, keep_attrs, skipna) ops.inject_all_ops_and_reduce_methods(Variable) class IndexVariable(Variable): """Wrapper for accommodating a pandas.Index in an xarray.Variable. IndexVariable preserve loaded values in the form of a pandas.Index instead of a NumPy array. Hence, their values are immutable and must always be one- dimensional. They also have a name property, which is the name of their sole dimension unless another name is given. """ __slots__ = () def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False): super().__init__(dims, data, attrs, encoding, fastpath) if self.ndim != 1: raise ValueError("%s objects must be 1-dimensional" % type(self).__name__) # Unlike in Variable, always eagerly load values into memory if not isinstance(self._data, PandasIndexAdapter): self._data = PandasIndexAdapter(self._data) def __dask_tokenize__(self): from dask.base import normalize_token # Don't waste time converting pd.Index to np.ndarray return normalize_token((type(self), self._dims, self._data.array, self._attrs)) def load(self): # data is already loaded into memory for IndexVariable return self # https://github.com/python/mypy/issues/1465 @Variable.data.setter # type: ignore def data(self, data): raise ValueError( f"Cannot assign to the .data attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. " f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate." ) @Variable.values.setter # type: ignore def values(self, values): raise ValueError( f"Cannot assign to the .values attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. " f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate." ) def chunk(self, chunks={}, name=None, lock=False): # Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk() return self.copy(deep=False) def _as_sparse(self, sparse_format=_default, fill_value=_default): # Dummy return self.copy(deep=False) def _to_dense(self): # Dummy return self.copy(deep=False) def _finalize_indexing_result(self, dims, data): if getattr(data, "ndim", 0) != 1: # returns Variable rather than IndexVariable if multi-dimensional return Variable(dims, data, self._attrs, self._encoding) else: return type(self)(dims, data, self._attrs, self._encoding, fastpath=True) def __setitem__(self, key, value): raise TypeError("%s values cannot be modified" % type(self).__name__) @classmethod def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False): """Specialized version of Variable.concat for IndexVariable objects. This exists because we want to avoid converting Index objects to NumPy arrays, if possible. """ if not isinstance(dim, str): (dim,) = dim.dims variables = list(variables) first_var = variables[0] if any(not isinstance(v, cls) for v in variables): raise TypeError( "IndexVariable.concat requires that all input " "variables be IndexVariable objects" ) indexes = [v._data.array for v in variables] if not indexes: data = [] else: data = indexes[0].append(indexes[1:]) if positions is not None: indices = nputils.inverse_permutation(np.concatenate(positions)) data = data.take(indices) attrs = dict(first_var.attrs) if not shortcut: for var in variables: if var.dims != first_var.dims: raise ValueError("inconsistent dimensions") utils.remove_incompatible_items(attrs, var.attrs) return cls(first_var.dims, data, attrs) def copy(self, deep=True, data=None): """Returns a copy of this object. `deep` is ignored since data is stored in the form of pandas.Index, which is already immutable. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Deep is ignored when data is given. Whether the data array is loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. Returns ------- object : Variable New object with dimensions, attributes, encodings, and optionally data copied from original. """ if data is None: data = self._data.copy(deep=deep) else: data = as_compatible_data(data) if self.shape != data.shape: raise ValueError( "Data shape {} must match shape of object {}".format( data.shape, self.shape ) ) return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True) def equals(self, other, equiv=None): # if equiv is specified, super up if equiv is not None: return super().equals(other, equiv) # otherwise use the native index equals, rather than looking at _data other = getattr(other, "variable", other) try: return self.dims == other.dims and self._data_equals(other) except (TypeError, AttributeError): return False def _data_equals(self, other): return self.to_index().equals(other.to_index()) def to_index_variable(self): """Return this variable as an xarray.IndexVariable""" return self to_coord = utils.alias(to_index_variable, "to_coord") def to_index(self): """Convert this variable to a pandas.Index""" # n.b. creating a new pandas.Index from an old pandas.Index is # basically free as pandas.Index objects are immutable assert self.ndim == 1 index = self._data.array if isinstance(index, pd.MultiIndex): # set default names for multi-index unnamed levels so that # we can safely rename dimension / coordinate later valid_level_names = [ name or "{}_level_{}".format(self.dims[0], i) for i, name in enumerate(index.names) ] index = index.set_names(valid_level_names) else: index = index.set_names(self.name) return index @property def level_names(self): """Return MultiIndex level names or None if this IndexVariable has no MultiIndex. """ index = self.to_index() if isinstance(index, pd.MultiIndex): return index.names else: return None def get_level_variable(self, level): """Return a new IndexVariable from a given MultiIndex level.""" if self.level_names is None: raise ValueError("IndexVariable %r has no MultiIndex" % self.name) index = self.to_index() return type(self)(self.dims, index.get_level_values(level)) @property def name(self): return self.dims[0] @name.setter def name(self, value): raise AttributeError("cannot modify name of IndexVariable in-place") # for backwards compatibility Coordinate = utils.alias(IndexVariable, "Coordinate") def _unified_dims(variables): # validate dimensions all_dims = {} for var in variables: var_dims = var.dims if len(set(var_dims)) < len(var_dims): raise ValueError( "broadcasting cannot handle duplicate " "dimensions: %r" % list(var_dims) ) for d, s in zip(var_dims, var.shape): if d not in all_dims: all_dims[d] = s elif all_dims[d] != s: raise ValueError( "operands cannot be broadcast together " "with mismatched lengths for dimension %r: %s" % (d, (all_dims[d], s)) ) return all_dims def _broadcast_compat_variables(*variables): """Create broadcast compatible variables, with the same dimensions. Unlike the result of broadcast_variables(), some variables may have dimensions of size 1 instead of the the size of the broadcast dimension. """ dims = tuple(_unified_dims(variables)) return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables) def broadcast_variables(*variables): """Given any number of variables, return variables with matching dimensions and broadcast data. The data on the returned variables will be a view of the data on the corresponding original arrays, but dimensions will be reordered and inserted so that both broadcast arrays have the same dimensions. The new dimensions are sorted in order of appearance in the first variable's dimensions followed by the second variable's dimensions. """ dims_map = _unified_dims(variables) dims_tuple = tuple(dims_map) return tuple( var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables ) def _broadcast_compat_data(self, other): if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]): # `other` satisfies the necessary Variable API for broadcast_variables new_self, new_other = _broadcast_compat_variables(self, other) self_data = new_self.data other_data = new_other.data dims = new_self.dims else: # rely on numpy broadcasting rules self_data = self.data other_data = other dims = self.dims return self_data, other_data, dims def concat(variables, dim="concat_dim", positions=None, shortcut=False): """Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. """ variables = list(variables) if all(isinstance(v, IndexVariable) for v in variables): return IndexVariable.concat(variables, dim, positions, shortcut) else: return Variable.concat(variables, dim, positions, shortcut) def assert_unique_multiindex_level_names(variables): """Check for uniqueness of MultiIndex level names in all given variables. Not public API. Used for checking consistency of DataArray and Dataset objects. """ level_names = defaultdict(list) all_level_names = set() for var_name, var in variables.items(): if isinstance(var._data, PandasIndexAdapter): idx_level_names = var.to_index_variable().level_names if idx_level_names is not None: for n in idx_level_names: level_names[n].append(f"{n!r} ({var_name})") if idx_level_names: all_level_names.update(idx_level_names) for k, v in level_names.items(): if k in variables: v.append("(%s)" % k) duplicate_names = [v for v in level_names.values() if len(v) > 1] if duplicate_names: conflict_str = "\n".join(", ".join(v) for v in duplicate_names) raise ValueError("conflicting MultiIndex level name(s):\n%s" % conflict_str) # Check confliction between level names and dimensions GH:2299 for k, v in variables.items(): for d in v.dims: if d in all_level_names: raise ValueError( "conflicting level / dimension names. {} " "already exists as a level name.".format(d) )
36.664265
124
0.589674
12,480
101,780
4.697596
0.086699
0.011053
0.005322
0.003002
0.36257
0.309948
0.276226
0.248729
0.225327
0.214035
0
0.004336
0.3247
101,780
2,775
125
36.677477
0.848629
0.358666
0
0.292683
0
0.002152
0.075808
0.003862
0
0
0
0.003604
0.003587
1
0.090387
false
0.001435
0.020803
0.016499
0.218077
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a70ca1b5958248a2b51b4d49a2d791ec9ec77e7
36,386
py
Python
pyamf/tests/test_util.py
bulutistan/Py3AMF
3de53095b52fe2bf82b69ba5ad0b894b53045f7e
[ "MIT" ]
42
2017-04-17T11:40:25.000Z
2021-09-19T09:59:31.000Z
pyamf/tests/test_util.py
bulutistan/Py3AMF
3de53095b52fe2bf82b69ba5ad0b894b53045f7e
[ "MIT" ]
8
2017-07-27T07:39:30.000Z
2021-10-19T09:49:09.000Z
pyamf/tests/test_util.py
bulutistan/Py3AMF
3de53095b52fe2bf82b69ba5ad0b894b53045f7e
[ "MIT" ]
15
2017-05-16T12:46:33.000Z
2021-09-20T02:30:57.000Z
# -*- coding: utf-8 -*- # # Copyright (c) The PyAMF Project. # See LICENSE.txt for details. """ Tests for AMF utilities. @since: 0.1.0 """ import unittest from datetime import datetime from io import BytesIO import pyamf from pyamf import util from pyamf.tests.util import replace_dict PosInf = 1e300000 NegInf = -1e300000 NaN = PosInf / PosInf def isNaN(val): return str(float(val)) == str(NaN) def isPosInf(val): return str(float(val)) == str(PosInf) def isNegInf(val): return str(float(val)) == str(NegInf) class TimestampTestCase(unittest.TestCase): """ Test UTC timestamps. """ def test_get_timestamp(self): self.assertEqual( util.get_timestamp(datetime(2007, 11, 12)), 1194825600 ) def test_get_datetime(self): self.assertEqual(util.get_datetime(1194825600), datetime(2007, 11, 12)) def test_get_negative_datetime(self): self.assertEqual(util.get_datetime(-31536000), datetime(1969, 1, 1)) def test_preserved_microseconds(self): dt = datetime(2009, 3, 8, 23, 30, 47, 770122) ts = util.get_timestamp(dt) self.assertEqual(util.get_datetime(ts), dt) class StringIOTestCase(unittest.TestCase): def test_create(self): sp = util.BufferedByteStream() self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) self.assertEqual(sp.getvalue(), b'') sp = util.BufferedByteStream(None) self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) sp = util.BufferedByteStream('') self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) sp = util.BufferedByteStream('spam') self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'spam') self.assertEqual(len(sp), 4) sp = util.BufferedByteStream(BytesIO('this is a test'.encode())) self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'this is a test') self.assertEqual(len(sp), 14) self.assertRaises(TypeError, util.BufferedByteStream, self) def test_getvalue(self): sp = util.BufferedByteStream() sp.write('asdfasdf') self.assertEqual(sp.getvalue(), b'asdfasdf') sp.write('spam') self.assertEqual(sp.getvalue(), b'asdfasdfspam') def test_read(self): sp = util.BufferedByteStream('this is a test') self.assertEqual(len(sp), 14) self.assertEqual(sp.read(1), b't') self.assertEqual(sp.getvalue(), b'this is a test') self.assertEqual(len(sp), 14) self.assertEqual(sp.read(10), b'his is a t') self.assertEqual(sp.read(), b'est') def test_seek(self): sp = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.tell(), 0) # Relative to the beginning of the stream sp.seek(0, 0) self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.read(1), b'a') self.assertEqual(len(sp), 26) sp.seek(10, 0) self.assertEqual(sp.tell(), 10) self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.read(1), b'k') self.assertEqual(len(sp), 26) sp.seek(-5, 1) self.assertEqual(sp.tell(), 6) self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.read(1), b'g') self.assertEqual(len(sp), 26) sp.seek(-3, 2) self.assertEqual(sp.tell(), 23) self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.read(1), b'x') self.assertEqual(len(sp), 26) def test_tell(self): sp = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(len(sp), 26) self.assertEqual(sp.tell(), 0) sp.read(1) self.assertEqual(sp.tell(), 1) self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(len(sp), 26) sp.read(5) self.assertEqual(sp.tell(), 6) def test_truncate(self): sp = util.BufferedByteStream('abcdef') self.assertEqual(sp.getvalue(), b'abcdef') self.assertEqual(len(sp), 6) sp.truncate() self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) sp = util.BufferedByteStream('hello') self.assertEqual(sp.getvalue(), b'hello') self.assertEqual(len(sp), 5) sp.truncate(3) self.assertEqual(sp.getvalue(), b'hel') self.assertEqual(len(sp), 3) def test_write(self): sp = util.BufferedByteStream() self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) self.assertEqual(sp.tell(), 0) sp.write('hello') self.assertEqual(sp.getvalue(), b'hello') self.assertEqual(len(sp), 5) self.assertEqual(sp.tell(), 5) sp = util.BufferedByteStream(b'xyz') self.assertEqual(sp.getvalue(), b'xyz') self.assertEqual(len(sp), 3) self.assertEqual(sp.tell(), 0) sp.write('abc') self.assertEqual(sp.getvalue(), b'abc') self.assertEqual(len(sp), 3) self.assertEqual(sp.tell(), 3) def test_len(self): sp = util.BufferedByteStream() self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) self.assertEqual(sp.tell(), 0) sp.write('xyz') self.assertEqual(len(sp), 3) sp = util.BufferedByteStream('foo') self.assertEqual(len(sp), 3) sp.seek(0, 2) sp.write('xyz') self.assertEqual(len(sp), 6) def test_consume(self): sp = util.BufferedByteStream() self.assertEqual(sp.getvalue(), b'') self.assertEqual(sp.tell(), 0) sp.consume() self.assertEqual(sp.getvalue(), b'') self.assertEqual(sp.tell(), 0) sp = util.BufferedByteStream('foobar') self.assertEqual(sp.getvalue(), b'foobar') self.assertEqual(sp.tell(), 0) sp.seek(3) self.assertEqual(sp.tell(), 3) sp.consume() self.assertEqual(sp.getvalue(), b'bar') self.assertEqual(sp.tell(), 0) # from ticket 451 - http://pyamf.org/ticket/451 sp = util.BufferedByteStream('abcdef') # move the stream pos to the end sp.read() self.assertEqual(len(sp), 6) sp.consume() self.assertEqual(len(sp), 0) sp = util.BufferedByteStream('abcdef') sp.seek(6) sp.consume() self.assertEqual(sp.getvalue(), b'') class DataTypeMixInTestCase(unittest.TestCase): endians = ('>', '<') # big, little def _write_endian(self, obj, func, args, expected): old_endian = obj.endian for x in range(2): obj.truncate() obj.endian = self.endians[x] func(*args) self.assertEqual(obj.getvalue(), expected[x]) obj.endian = old_endian def _read_endian(self, data, func, args, expected): for x in range(2): obj = util.BufferedByteStream(data[x]) obj.endian = self.endians[x] result = getattr(obj, func)(*args) self.assertEqual(result, expected) def test_read_uchar(self): x = util.BufferedByteStream(b'\x00\xff') self.assertEqual(x.read_uchar(), 0) self.assertEqual(x.read_uchar(), 255) def test_write_uchar(self): x = util.BufferedByteStream() x.write_uchar(0) self.assertEqual(x.getvalue(), b'\x00') x.write_uchar(255) self.assertEqual(x.getvalue(), b'\x00\xff') self.assertRaises(OverflowError, x.write_uchar, 256) self.assertRaises(OverflowError, x.write_uchar, -1) self.assertRaises(TypeError, x.write_uchar, 'f') def test_read_char(self): x = util.BufferedByteStream(b'\x00\x7f\xff\x80') self.assertEqual(x.read_char(), 0) self.assertEqual(x.read_char(), 127) self.assertEqual(x.read_char(), -1) self.assertEqual(x.read_char(), -128) def test_write_char(self): x = util.BufferedByteStream() x.write_char(0) x.write_char(-128) x.write_char(127) self.assertEqual(x.getvalue(), b'\x00\x80\x7f') self.assertRaises(OverflowError, x.write_char, 128) self.assertRaises(OverflowError, x.write_char, -129) self.assertRaises(TypeError, x.write_char, 'f') def test_write_ushort(self): x = util.BufferedByteStream() self._write_endian(x, x.write_ushort, (0,), (b'\x00\x00', b'\x00\x00')) self._write_endian(x, x.write_ushort, (12345,), (b'09', b'90')) self._write_endian( x, x.write_ushort, (65535,), (b'\xff\xff', b'\xff\xff') ) self.assertRaises(OverflowError, x.write_ushort, 65536) self.assertRaises(OverflowError, x.write_ushort, -1) self.assertRaises(TypeError, x.write_ushort, 'aa') def test_read_ushort(self): self._read_endian([b'\x00\x00', b'\x00\x00'], 'read_ushort', (), 0) self._read_endian(['09', '90'], 'read_ushort', (), 12345) self._read_endian([b'\xff\xff', b'\xff\xff'], 'read_ushort', (), 65535) def test_write_short(self): x = util.BufferedByteStream() self._write_endian( x, x.write_short, (-5673,), (b'\xe9\xd7', b'\xd7\xe9') ) self._write_endian( x, x.write_short, (32767,), (b'\x7f\xff', b'\xff\x7f') ) self.assertRaises(OverflowError, x.write_ushort, 65537) self.assertRaises(OverflowError, x.write_ushort, -1) self.assertRaises(TypeError, x.write_short, '\x00\x00') def test_read_short(self): self._read_endian([b'\xe9\xd7', b'\xd7\xe9'], 'read_short', (), -5673) self._read_endian([b'\x7f\xff', b'\xff\x7f'], 'read_short', (), 32767) def test_write_ulong(self): x = util.BufferedByteStream() self._write_endian( x, x.write_ulong, (0,), (b'\x00\x00\x00\x00', b'\x00\x00\x00\x00') ) self._write_endian( x, x.write_ulong, (16810049,), (b'\x01\x00\x80A', b'A\x80\x00\x01') ) self._write_endian( x, x.write_ulong, (4294967295,), (b'\xff\xff\xff\xff', b'\xff\xff\xff\xff') ) self.assertRaises(OverflowError, x.write_ulong, 4294967296) self.assertRaises(OverflowError, x.write_ulong, -1) self.assertRaises(TypeError, x.write_ulong, '\x00\x00\x00\x00') def test_read_ulong(self): self._read_endian( [b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'], 'read_ulong', (), 0 ) self._read_endian( [b'\x01\x00\x80A', b'A\x80\x00\x01'], 'read_ulong', (), 16810049 ) self._read_endian( [b'\xff\xff\xff\xff', b'\xff\xff\xff\xff'], 'read_ulong', (), 4294967295 ) def test_write_long(self): x = util.BufferedByteStream() self._write_endian( x, x.write_long, (0,), (b'\x00\x00\x00\x00', b'\x00\x00\x00\x00') ) self._write_endian( x, x.write_long, (16810049,), (b'\x01\x00\x80A', b'A\x80\x00\x01') ) self._write_endian( x, x.write_long, (2147483647,), (b'\x7f\xff\xff\xff', b'\xff\xff\xff\x7f') ) self._write_endian( x, x.write_long, (-2147483648,), (b'\x80\x00\x00\x00', b'\x00\x00\x00\x80') ) self.assertRaises(OverflowError, x.write_long, 2147483648) self.assertRaises(OverflowError, x.write_long, -2147483649) self.assertRaises(TypeError, x.write_long, '\x00\x00\x00\x00') def test_read_long(self): self._read_endian( [b'\xff\xff\xcf\xc7', b'\xc7\xcf\xff\xff'], 'read_long', (), -12345 ) self._read_endian( [b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'], 'read_long', (), 0 ) self._read_endian( [b'\x01\x00\x80A', b'A\x80\x00\x01'], 'read_long', (), 16810049 ) self._read_endian( [b'\x7f\xff\xff\xff', b'\xff\xff\xff\x7f'], 'read_long', (), 2147483647 ) def test_write_u24bit(self): x = util.BufferedByteStream() self._write_endian( x, x.write_24bit_uint, (0,), (b'\x00\x00\x00', b'\x00\x00\x00') ) self._write_endian( x, x.write_24bit_uint, (4292609,), (b'A\x80\x01', b'\x01\x80A') ) self._write_endian( x, x.write_24bit_uint, (16777215,), (b'\xff\xff\xff', b'\xff\xff\xff') ) self.assertRaises(OverflowError, x.write_24bit_uint, 16777216) self.assertRaises(OverflowError, x.write_24bit_uint, -1) self.assertRaises(TypeError, x.write_24bit_uint, '\x00\x00\x00') def test_read_u24bit(self): self._read_endian( [b'\x00\x00\x00', b'\x00\x00\x00'], 'read_24bit_uint', (), 0 ) self._read_endian( [b'\x00\x00\x80', b'\x80\x00\x00'], 'read_24bit_uint', (), 128 ) self._read_endian( [b'\x80\x00\x00', b'\x00\x00\x80'], 'read_24bit_uint', (), 8388608 ) self._read_endian( [b'\xff\xff\x7f', b'\x7f\xff\xff'], 'read_24bit_uint', (), 16777087 ) self._read_endian( [b'\x7f\xff\xff', b'\xff\xff\x7f'], 'read_24bit_uint', (), 8388607 ) def test_write_24bit(self): x = util.BufferedByteStream() self._write_endian( x, x.write_24bit_int, (0,), (b'\x00\x00\x00', b'\x00\x00\x00') ) self._write_endian( x, x.write_24bit_int, (128,), (b'\x00\x00\x80', b'\x80\x00\x00') ) self._write_endian( x, x.write_24bit_int, (8388607,), (b'\x7f\xff\xff', b'\xff\xff\x7f') ) self._write_endian( x, x.write_24bit_int, (-1,), (b'\xff\xff\xff', b'\xff\xff\xff') ) self._write_endian( x, x.write_24bit_int, (-8388608,), (b'\x80\x00\x00', b'\x00\x00\x80') ) self.assertRaises(OverflowError, x.write_24bit_int, 8388608) self.assertRaises(OverflowError, x.write_24bit_int, -8388609) self.assertRaises(TypeError, x.write_24bit_int, '\x00\x00\x00') def test_read_24bit(self): self._read_endian( [b'\x00\x00\x00', b'\x00\x00\x00'], 'read_24bit_int', (), 0 ) self._read_endian( [b'\x00\x00\x80', b'\x80\x00\x00'], 'read_24bit_int', (), 128 ) self._read_endian( [b'\x80\x00\x00', b'\x00\x00\x80'], 'read_24bit_int', (), -8388608 ) self._read_endian( [b'\xff\xff\x7f', b'\x7f\xff\xff'], 'read_24bit_int', (), -129 ) self._read_endian( [b'\x7f\xff\xff', b'\xff\xff\x7f'], 'read_24bit_int', (), 8388607 ) def test_write_float(self): x = util.BufferedByteStream() self._write_endian( x, x.write_float, (0.2,), (b'>L\xcc\xcd', b'\xcd\xccL>') ) self.assertRaises(TypeError, x.write_float, 'foo') def test_read_float(self): self._read_endian( [b'?\x00\x00\x00', b'\x00\x00\x00?'], 'read_float', (), 0.5 ) def test_write_double(self): x = util.BufferedByteStream() self._write_endian( x, x.write_double, (0.2,), (b'?\xc9\x99\x99\x99\x99\x99\x9a', b'\x9a\x99\x99\x99\x99\x99\xc9?') ) self.assertRaises(TypeError, x.write_double, 'foo') def test_read_double(self): self._read_endian( [b'?\xc9\x99\x99\x99\x99\x99\x9a', b'\x9a\x99\x99\x99\x99\x99\xc9?'], 'read_double', (), 0.2 ) def test_write_utf8_string(self): x = util.BufferedByteStream() self._write_endian( x, x.write_utf8_string, (u'ᚠᛇᚻ',), [b'\xe1\x9a\xa0\xe1\x9b\x87\xe1\x9a\xbb'] * 2 ) self.assertRaises(TypeError, x.write_utf8_string, 1) self.assertRaises(TypeError, x.write_utf8_string, 1.0) self.assertRaises(TypeError, x.write_utf8_string, object()) x.write_utf8_string('\xff') def test_read_utf8_string(self): self._read_endian( [b'\xe1\x9a\xa0\xe1\x9b\x87\xe1\x9a\xbb'] * 2, 'read_utf8_string', (9,), u'ᚠᛇᚻ' ) def test_nan(self): x = util.BufferedByteStream(b'\xff\xf8\x00\x00\x00\x00\x00\x00') self.assertTrue(isNaN(x.read_double())) x = util.BufferedByteStream(b'\xff\xf0\x00\x00\x00\x00\x00\x00') self.assertTrue(isNegInf(x.read_double())) x = util.BufferedByteStream(b'\x7f\xf0\x00\x00\x00\x00\x00\x00') self.assertTrue(isPosInf(x.read_double())) # now test little endian x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf8\xff') x.endian = '<' self.assertTrue(isNaN(x.read_double())) x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf0\xff') x.endian = '<' self.assertTrue(isNegInf(x.read_double())) x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf0\x7f') x.endian = '<' self.assertTrue(isPosInf(x.read_double())) def test_write_infinites(self): x = util.BufferedByteStream() self._write_endian(x, x.write_double, (NaN,), ( b'\xff\xf8\x00\x00\x00\x00\x00\x00', b'\x00\x00\x00\x00\x00\x00\xf8\xff' )) self._write_endian(x, x.write_double, (PosInf,), ( b'\x7f\xf0\x00\x00\x00\x00\x00\x00', b'\x00\x00\x00\x00\x00\x00\xf0\x7f' )) self._write_endian(x, x.write_double, (NegInf,), ( b'\xff\xf0\x00\x00\x00\x00\x00\x00', b'\x00\x00\x00\x00\x00\x00\xf0\xff' )) class BufferedByteStreamTestCase(unittest.TestCase): """ Tests for L{BufferedByteStream<util.BufferedByteStream>} """ def test_create(self): x = util.BufferedByteStream() self.assertEqual(x.getvalue(), b'') self.assertEqual(x.tell(), 0) x = util.BufferedByteStream('abc') self.assertEqual(x.getvalue(), b'abc') self.assertEqual(x.tell(), 0) def test_read(self): x = util.BufferedByteStream() self.assertEqual(x.tell(), 0) self.assertEqual(len(x), 0) self.assertRaises(IOError, x.read) self.assertRaises(IOError, x.read, 10) x.write('hello') x.seek(0) self.assertRaises(IOError, x.read, 10) self.assertEqual(x.read(), b'hello') def test_read_negative(self): """ @see: #799 """ x = util.BufferedByteStream() x.write('*' * 6000) x.seek(100) self.assertRaises(IOError, x.read, -345) def test_peek(self): x = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz') self.assertEqual(x.tell(), 0) self.assertEqual(x.peek(), b'a') self.assertEqual(x.peek(5), b'abcde') self.assertEqual(x.peek(-1), b'abcdefghijklmnopqrstuvwxyz') x.seek(10) self.assertEqual(x.peek(50), b'klmnopqrstuvwxyz') def test_eof(self): x = util.BufferedByteStream() self.assertTrue(x.at_eof()) x.write('hello') x.seek(0) self.assertFalse(x.at_eof()) x.seek(0, 2) self.assertTrue(x.at_eof()) def test_remaining(self): x = util.BufferedByteStream('spameggs') self.assertEqual(x.tell(), 0) self.assertEqual(x.remaining(), 8) x.seek(2) self.assertEqual(x.tell(), 2) self.assertEqual(x.remaining(), 6) def test_add(self): a = util.BufferedByteStream('a') b = util.BufferedByteStream('b') c = a + b self.assertTrue(isinstance(c, util.BufferedByteStream)) self.assertEqual(c.getvalue(), b'ab') self.assertEqual(c.tell(), 0) def test_add_pos(self): a = util.BufferedByteStream(b'abc') b = util.BufferedByteStream(b'def') a.seek(1) b.seek(0, 2) self.assertEqual(a.tell(), 1) self.assertEqual(b.tell(), 3) self.assertEqual(a.tell(), 1) self.assertEqual(b.tell(), 3) def test_append_types(self): # test non string types a = util.BufferedByteStream() self.assertRaises(TypeError, a.append, 234234) self.assertRaises(TypeError, a.append, 234.0) self.assertRaises(TypeError, a.append, 234234) self.assertRaises(TypeError, a.append, []) self.assertRaises(TypeError, a.append, {}) self.assertRaises(TypeError, a.append, lambda _: None) self.assertRaises(TypeError, a.append, ()) self.assertRaises(TypeError, a.append, object()) def test_append_string(self): """ Test L{util.BufferedByteStream.append} with C{str} objects. """ # test empty a = util.BufferedByteStream() self.assertEqual(a.getvalue(), b'') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 0) a.append('foo') self.assertEqual(a.getvalue(), b'foo') self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved self.assertEqual(len(a), 3) # test pointer beginning, some data a = util.BufferedByteStream('bar') self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved self.assertEqual(len(a), 6) # test pointer middle, some data a = util.BufferedByteStream('bar') a.seek(2) self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 2) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 2) # <-- pointer hasn't moved self.assertEqual(len(a), 6) # test pointer end, some data a = util.BufferedByteStream('bar') a.seek(0, 2) self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 3) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 3) # <-- pointer hasn't moved self.assertEqual(len(a), 6) class Foo(object): def getvalue(self): return b'foo' def __str__(self): raise AttributeError() a = util.BufferedByteStream() self.assertEqual(a.getvalue(), b'') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 0) a.append(Foo()) self.assertEqual(a.getvalue(), b'foo') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 3) def test_append_unicode(self): """ Test L{util.BufferedByteStream.append} with C{unicode} objects. """ # test empty a = util.BufferedByteStream() self.assertEqual(a.getvalue(), b'') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 0) a.append('foo') self.assertEqual(a.getvalue(), b'foo') self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved self.assertEqual(len(a), 3) # test pointer beginning, some data a = util.BufferedByteStream('bar') self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved self.assertEqual(len(a), 6) # test pointer middle, some data a = util.BufferedByteStream('bar') a.seek(2) self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 2) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 2) # <-- pointer hasn't moved self.assertEqual(len(a), 6) # test pointer end, some data a = util.BufferedByteStream('bar') a.seek(0, 2) self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 3) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 3) # <-- pointer hasn't moved self.assertEqual(len(a), 6) class Foo(object): def getvalue(self): return u'foo' def __str__(self): raise AttributeError() a = util.BufferedByteStream() self.assertEqual(a.getvalue(), b'') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 0) a.append(Foo()) self.assertEqual(a.getvalue(), b'foo') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 3) class DummyAlias(pyamf.ClassAlias): pass class AnotherDummyAlias(pyamf.ClassAlias): pass class YADummyAlias(pyamf.ClassAlias): pass class ClassAliasTestCase(unittest.TestCase): def setUp(self): self.old_aliases = pyamf.ALIAS_TYPES.copy() def tearDown(self): replace_dict(self.old_aliases, pyamf.ALIAS_TYPES) def test_simple(self): class A(object): pass pyamf.register_alias_type(DummyAlias, A) self.assertEqual(util.get_class_alias(A), DummyAlias) def test_nested(self): class A(object): pass class B(object): pass class C(object): pass pyamf.register_alias_type(DummyAlias, A, B, C) self.assertEqual(util.get_class_alias(B), DummyAlias) def test_multiple(self): class A(object): pass class B(object): pass class C(object): pass pyamf.register_alias_type(DummyAlias, A) pyamf.register_alias_type(AnotherDummyAlias, B) pyamf.register_alias_type(YADummyAlias, C) self.assertEqual(util.get_class_alias(B), AnotherDummyAlias) self.assertEqual(util.get_class_alias(C), YADummyAlias) self.assertEqual(util.get_class_alias(A), DummyAlias) def test_none_existant(self): self.assertEqual(util.get_class_alias(self.__class__), None) def test_subclass(self): class A(object): pass class B(A): pass pyamf.register_alias_type(DummyAlias, A) self.assertEqual(util.get_class_alias(B), DummyAlias) class IsClassSealedTestCase(unittest.TestCase): """ Tests for L{util.is_class_sealed} """ def test_new_mixed(self): class A(object): __slots__ = ['foo', 'bar'] class B(A): pass class C(B): __slots__ = ('spam', 'eggs') self.assertTrue(util.is_class_sealed(A)) self.assertFalse(util.is_class_sealed(B)) self.assertFalse(util.is_class_sealed(C)) def test_deep(self): class A(object): __slots__ = ['foo', 'bar'] class B(A): __slots__ = ('gak',) class C(B): pass self.assertTrue(util.is_class_sealed(A)) self.assertTrue(util.is_class_sealed(B)) self.assertFalse(util.is_class_sealed(C)) class GetClassMetaTestCase(unittest.TestCase): """ Tests for L{util.get_class_meta} """ def test_types(self): class A: pass class B(object): pass for t in ['', u'', 1, 1.0, 1, [], {}, object, object(), A(), B()]: self.assertRaises(TypeError, util.get_class_meta, t) def test_no_meta(self): class A: pass class B(object): pass empty = { 'readonly_attrs': None, 'static_attrs': None, 'synonym_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'exclude_attrs': None, 'proxy_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), empty) self.assertEqual(util.get_class_meta(B), empty) def test_alias(self): class A: class __amf__: alias = 'foo.bar.Spam' class B(object): class __amf__: alias = 'foo.bar.Spam' meta = { 'readonly_attrs': None, 'static_attrs': None, 'synonym_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': 'foo.bar.Spam', 'amf3': None, 'proxy_attrs': None, 'exclude_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_static(self): class A: class __amf__: static = ['foo', 'bar'] class B(object): class __amf__: static = ['foo', 'bar'] meta = { 'readonly_attrs': None, 'static_attrs': ['foo', 'bar'], 'synonym_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'exclude_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_exclude(self): class A: class __amf__: exclude = ['foo', 'bar'] class B(object): class __amf__: exclude = ['foo', 'bar'] meta = { 'readonly_attrs': None, 'exclude_attrs': ['foo', 'bar'], 'synonym_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'static_attrs': None, 'proxy_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_readonly(self): class A: class __amf__: readonly = ['foo', 'bar'] class B(object): class __amf__: readonly = ['foo', 'bar'] meta = { 'exclude_attrs': None, 'readonly_attrs': ['foo', 'bar'], 'synonym_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'static_attrs': None, 'external': None, 'proxy_attrs': None, } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_amf3(self): class A: class __amf__: amf3 = True class B(object): class __amf__: amf3 = True meta = { 'exclude_attrs': None, 'proxy_attrs': None, 'synonym_attrs': None, 'readonly_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': True, 'static_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_dynamic(self): class A: class __amf__: dynamic = False class B(object): class __amf__: dynamic = False meta = { 'exclude_attrs': None, 'proxy_attrs': None, 'synonym_attrs': None, 'readonly_attrs': None, 'proxy_attrs': None, 'dynamic': False, 'alias': None, 'amf3': None, 'static_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_external(self): class A: class __amf__: external = True class B(object): class __amf__: external = True meta = { 'exclude_attrs': None, 'proxy_attrs': None, 'synonym_attrs': None, 'readonly_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'static_attrs': None, 'external': True } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_dict(self): meta = { 'exclude': ['foo'], 'readonly': ['bar'], 'dynamic': False, 'alias': 'spam.eggs', 'proxy_attrs': None, 'synonym_attrs': None, 'amf3': True, 'static': ['baz'], 'external': True } class A: __amf__ = meta class B(object): __amf__ = meta ret = { 'readonly_attrs': ['bar'], 'static_attrs': ['baz'], 'proxy_attrs': None, 'dynamic': False, 'alias': 'spam.eggs', 'amf3': True, 'exclude_attrs': ['foo'], 'synonym_attrs': None, 'proxy_attrs': None, 'external': True } self.assertEqual(util.get_class_meta(A), ret) self.assertEqual(util.get_class_meta(B), ret) def test_proxy(self): class A: class __amf__: proxy = ['foo', 'bar'] class B(object): class __amf__: proxy = ['foo', 'bar'] meta = { 'exclude_attrs': None, 'readonly_attrs': None, 'proxy_attrs': ['foo', 'bar'], 'synonym_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'static_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_synonym(self): class A: class __amf__: synonym = {'foo': 'bar'} class B(object): class __amf__: synonym = {'foo': 'bar'} meta = { 'exclude_attrs': None, 'readonly_attrs': None, 'proxy_attrs': None, 'synonym_attrs': {'foo': 'bar'}, 'dynamic': None, 'alias': None, 'amf3': None, 'static_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta)
27.461132
81
0.542516
4,237
36,386
4.513335
0.068445
0.167076
0.038592
0.028866
0.76808
0.688072
0.59886
0.539612
0.495163
0.478795
0
0.051481
0.3124
36,386
1,324
82
27.481873
0.712858
0.027098
0
0.581706
0
0
0.113514
0.024349
0
0
0
0
0.279548
1
0.080164
false
0.018499
0.006167
0.005139
0.163412
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0a71715157a2be752f2c46cd1b41f44aab6ece59
3,087
py
Python
e-valuator.py
keocol/e-valuator
c2bab22e3debf08263fef57ee4135312a2bb2b0d
[ "MIT" ]
null
null
null
e-valuator.py
keocol/e-valuator
c2bab22e3debf08263fef57ee4135312a2bb2b0d
[ "MIT" ]
null
null
null
e-valuator.py
keocol/e-valuator
c2bab22e3debf08263fef57ee4135312a2bb2b0d
[ "MIT" ]
null
null
null
import dns.resolver import sys import colorama import platform from colorama import init, Fore, Back, Style import re # pip install -r requirements.txt (colorama) os = platform.platform() if os.find('Windows')!= (-1): init(convert=True) print(""" ███████╗░░░░░░██╗░░░██╗░█████╗░██╗░░░░░██╗░░░██╗░█████╗░████████╗░█████╗░██████╗░ ██╔════╝░░░░░░██║░░░██║██╔══██╗██║░░░░░██║░░░██║██╔══██╗╚══██╔══╝██╔══██╗██╔══██╗ █████╗░░█████╗╚██╗░██╔╝███████║██║░░░░░██║░░░██║███████║░░░██║░░░██║░░██║██████╔╝ ██╔══╝░░╚════╝░╚████╔╝░██╔══██║██║░░░░░██║░░░██║██╔══██║░░░██║░░░██║░░██║██╔══██╗ ███████╗░░░░░░░░╚██╔╝░░██║░░██║███████╗╚██████╔╝██║░░██║░░░██║░░░╚█████╔╝██║░░██║ ╚══════╝░░░░░░░░░╚═╝░░░╚═╝░░╚═╝╚══════╝░╚═════╝░╚═╝░░╚═╝░░░╚═╝░░░░╚════╝░╚═╝░░╚═╝ \x1B[3mSimple Python3 Script for Checking SPF & DMARC Records.\x1B[0m """ + '\n') Domain = input('Domain: ') # Checking SPF print ('\n[+] Checking SPF Record...') try: obj_answer = dns.resolver.resolve(Domain, 'TXT') except: sys.exit(Fore.RED + "\n[+] Domain can't be resolved! Check the domain name and try again..") answer = str(obj_answer.response) cond = answer.find("v=spf") if cond != -1: print ('[+] SPF Record Found!') spf_pos= answer.find("v=spf") spf_end_tmp= (answer[spf_pos:].find("\n"))-1 spf_end= answer[spf_pos:spf_pos+spf_end_tmp] print (Fore.GREEN + '[+] Domain: ' + Domain) print (Fore.GREEN + '[+] SPF Record: ' +spf_end) neutral_check = answer.find('?all') fail_check = answer.find('-all') soft_check = answer.find('~all') pass_check = answer.find('+all') if neutral_check != -1: print (Fore.RED +'[+] Result: ?all IS FOUND!! Domain emails can be spoofed!') elif fail_check != -1: print (Fore.GREEN +'[+] Result: -all is found. SPF is correctly configured.') elif soft_check != -1: print (Fore.GREEN +'[+] Result: ~all is found. SPF is correctly configured.') elif pass_check != -1: print (Fore.RED +'[+] Result: +all DOMAIN IS VERY BADLY CONFIGURED! Domain emails can be spoofed!') else: print (Fore.RED +'[+] Result: No condition is set for "all"! Domain emails can be spoofed!') else: print (Fore.RED +'[+] No SPF Record Found!!') # Checking DMARC print (Fore.WHITE + '\n\n[+] Checking DMARC Policy..') try: obj2_answer = dns.resolver.resolve('_dmarc.'+ Domain, 'TXT') except: sys.exit(Fore.RED + "[+] The domain doesn't have DMARC policy configured!") answer2 = str(obj2_answer.response) print (Fore.WHITE + '[+] DMARC Policy Found!') none_check = re.search("[\;\s]p\=none\;", answer2) reject_check = re.search("[\;\s]p\=reject\;", answer2) quarantine_check = re.search("[\;\s]p\=quarantine\;", answer2) if none_check: print (Fore.RED + '[+] Result: DMARC Policy is set as none! Domain emails can be spoofed!') if reject_check: print (Fore.GREEN + '[+] Result: DMARC Policy is set as reject! Domain emails are safe from spoofing.') if quarantine_check: print (Fore.GREEN + '[+] Result: DMARC Policy is set as quarantine! Domain emails are safe from spoofing.')
32.15625
108
0.547781
376
3,087
5.712766
0.276596
0.054469
0.039106
0.03352
0.272346
0.22905
0.189013
0.136872
0.136872
0.099628
0
0.006923
0.157758
3,087
96
108
32.15625
0.632308
0.022676
0
0.092308
0
0
0.516615
0.17369
0
0
0
0
0
1
0
false
0.030769
0.092308
0
0.092308
0.246154
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a50e8edb03f4c7852b3cc7809ccd49216f25af1
2,655
py
Python
api/server.py
qh73xe/HowAboutNatume
8d994a1e16e2153dc200097d8f8b43713d76a3d5
[ "MIT" ]
null
null
null
api/server.py
qh73xe/HowAboutNatume
8d994a1e16e2153dc200097d8f8b43713d76a3d5
[ "MIT" ]
7
2020-03-24T15:37:48.000Z
2021-06-01T22:01:22.000Z
api/server.py
qh73xe/HowAboutNatume
8d994a1e16e2153dc200097d8f8b43713d76a3d5
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -* """トルネードを使用した ask.api を作成します.""" from json import dumps from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop from tornado.options import parse_command_line from tornado.web import Application, RequestHandler from tornado.options import define, options from tokenizer import get_entity from logger import getLogger LOGGER = getLogger('API_MODULE') define("port", default=8000, help="run on the given port", type=int) class AskHandler(RequestHandler): """question に get された文章と親密度の高い語を返します.""" def get(self): """Question に答えます.""" from ask import ask author = self.get_argument('author') question = self.get_argument('question') answers = { 'answers': ask(author, get_entity(question)) } self.finish( dumps( answers, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': ') ) ) def post(self): """Action on google の web フック用レスポンス""" from ask import ask import json data = json.loads(self.request.body) LOGGER.info('input: {data}'.format(data=data)) author = data.get('author', '夏目漱石') question = data.get('question') answers = ask(author, get_entity(question)) if answers: adjective = answers.get('adjective', None) nouns = answers.get('nouns') if adjective: speech = '。'.join([ 'それは {adjective} 質問ですね'.format(adjective=adjective[0]), 'きっと, {0} や {1} あるいは {2} のことです'.format(*nouns) ]) else: speech = 'それはきっと, {0} や {1} あるいは {2} のことです'.format(*nouns) else: speech = '。'.join([ '{q} についてですか'.format(q=question), '難しいことを聞きますね', '私にはわからないです' ]) displayText = speech respose = { 'speech': speech, 'displayText': displayText, 'data': answers, 'contextOut': [answers], 'source': 'how-about-natume' } self.finish( dumps( respose, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': ') ) ) if __name__ == "__main__": parse_command_line() app = Application(handlers=[(r"/", AskHandler)]) http_server = HTTPServer(app) http_server.listen(options.port) IOLoop.instance().start()
27.947368
75
0.529567
257
2,655
5.377432
0.428016
0.039797
0.026049
0.034732
0.162084
0.162084
0.114327
0.114327
0.114327
0.049204
0
0.008097
0.348776
2,655
94
76
28.244681
0.791209
0.049718
0
0.27027
0
0
0.113909
0
0
0
0
0
0
1
0.027027
false
0
0.148649
0
0.189189
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a5258097a7cb4af2ef28cde1153d8db7884fd80
3,012
py
Python
proxy/http/chunk_parser.py
GDGSNF/proxy.py
3ee2824217286df3c108beadf3185eee35c28b49
[ "BSD-3-Clause" ]
null
null
null
proxy/http/chunk_parser.py
GDGSNF/proxy.py
3ee2824217286df3c108beadf3185eee35c28b49
[ "BSD-3-Clause" ]
null
null
null
proxy/http/chunk_parser.py
GDGSNF/proxy.py
3ee2824217286df3c108beadf3185eee35c28b49
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ proxy.py ~~~~~~~~ ⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on Network monitoring, controls & Application development, testing, debugging. :copyright: (c) 2013-present by Abhinav Singh and contributors. :license: BSD, see LICENSE for more details. """ from typing import NamedTuple, Tuple, List, Optional from ..common.utils import bytes_, find_http_line from ..common.constants import CRLF, DEFAULT_BUFFER_SIZE ChunkParserStates = NamedTuple( 'ChunkParserStates', [ ('WAITING_FOR_SIZE', int), ('WAITING_FOR_DATA', int), ('COMPLETE', int), ], ) chunkParserStates = ChunkParserStates(1, 2, 3) class ChunkParser: """HTTP chunked encoding response parser.""" def __init__(self) -> None: self.state = chunkParserStates.WAITING_FOR_SIZE self.body: bytes = b'' # Parsed chunks self.chunk: bytes = b'' # Partial chunk received # Expected size of next following chunk self.size: Optional[int] = None def parse(self, raw: bytes) -> bytes: more = len(raw) > 0 while more and self.state != chunkParserStates.COMPLETE: more, raw = self.process(raw) return raw def process(self, raw: bytes) -> Tuple[bool, bytes]: if self.state == chunkParserStates.WAITING_FOR_SIZE: # Consume prior chunk in buffer # in case chunk size without CRLF was received raw = self.chunk + raw self.chunk = b'' # Extract following chunk data size line, raw = find_http_line(raw) # CRLF not received or Blank line was received. if line is None or line.strip() == b'': self.chunk = raw raw = b'' else: self.size = int(line, 16) self.state = chunkParserStates.WAITING_FOR_DATA elif self.state == chunkParserStates.WAITING_FOR_DATA: assert self.size is not None remaining = self.size - len(self.chunk) self.chunk += raw[:remaining] raw = raw[remaining:] if len(self.chunk) == self.size: raw = raw[len(CRLF):] self.body += self.chunk if self.size == 0: self.state = chunkParserStates.COMPLETE else: self.state = chunkParserStates.WAITING_FOR_SIZE self.chunk = b'' self.size = None return len(raw) > 0, raw @staticmethod def to_chunks(raw: bytes, chunk_size: int = DEFAULT_BUFFER_SIZE) -> bytes: chunks: List[bytes] = [] for i in range(0, len(raw), chunk_size): chunk = raw[i: i + chunk_size] chunks.append(bytes_('{:x}'.format(len(chunk)))) chunks.append(chunk) chunks.append(bytes_('{:x}'.format(0))) chunks.append(b'') return CRLF.join(chunks) + CRLF
35.857143
86
0.581009
345
3,012
4.985507
0.344928
0.047093
0.105814
0.09593
0.148837
0.12093
0.051163
0
0
0
0
0.007232
0.311421
3,012
83
87
36.289157
0.820636
0.191899
0
0.103448
0
0
0.027197
0
0
0
0
0
0.017241
1
0.068966
false
0
0.051724
0
0.189655
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a53c43c787fb87b95985049d6273d36fc7dbdab
31,240
py
Python
nova/pci/stats.py
10088/nova
972c06c608f0b00e9066d7f581fd81197065cf49
[ "Apache-2.0" ]
null
null
null
nova/pci/stats.py
10088/nova
972c06c608f0b00e9066d7f581fd81197065cf49
[ "Apache-2.0" ]
null
null
null
nova/pci/stats.py
10088/nova
972c06c608f0b00e9066d7f581fd81197065cf49
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2013 Intel, Inc. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import typing as ty from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils from nova import exception from nova import objects from nova.objects import fields from nova.objects import pci_device_pool from nova.pci.request import PCI_REMOTE_MANAGED_TAG from nova.pci import utils from nova.pci import whitelist CONF = cfg.CONF LOG = logging.getLogger(__name__) # TODO(stephenfin): We might want to use TypedDict here. Refer to # https://mypy.readthedocs.io/en/latest/kinds_of_types.html#typeddict for # more information. Pool = ty.Dict[str, ty.Any] class PciDeviceStats(object): """PCI devices summary information. According to the PCI SR-IOV spec, a PCI physical function can have up to 256 PCI virtual functions, thus the number of assignable PCI functions in a cloud can be big. The scheduler needs to know all device availability information in order to determine which compute hosts can support a PCI request. Passing individual virtual device information to the scheduler does not scale, so we provide summary information. Usually the virtual functions provided by a host PCI device have the same value for most properties, like vendor_id, product_id and class type. The PCI stats class summarizes this information for the scheduler. The pci stats information is maintained exclusively by compute node resource tracker and updated to database. The scheduler fetches the information and selects the compute node accordingly. If a compute node is selected, the resource tracker allocates the devices to the instance and updates the pci stats information. This summary information will be helpful for cloud management also. """ pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type'] def __init__( self, numa_topology: 'objects.NUMATopology', stats: 'objects.PCIDevicePoolList' = None, dev_filter: whitelist.Whitelist = None, ) -> None: self.numa_topology = numa_topology self.pools = ( [pci_pool.to_dict() for pci_pool in stats] if stats else [] ) self.pools.sort(key=lambda item: len(item)) self.dev_filter = dev_filter or whitelist.Whitelist( CONF.pci.passthrough_whitelist) def _equal_properties( self, dev: Pool, entry: Pool, matching_keys: ty.List[str], ) -> bool: return all(dev.get(prop) == entry.get(prop) for prop in matching_keys) def _find_pool(self, dev_pool: Pool) -> ty.Optional[Pool]: """Return the first pool that matches dev.""" for pool in self.pools: pool_keys = pool.copy() del pool_keys['count'] del pool_keys['devices'] if (len(pool_keys.keys()) == len(dev_pool.keys()) and self._equal_properties(dev_pool, pool_keys, list(dev_pool))): return pool return None @staticmethod def _ensure_remote_managed_tag( dev: 'objects.PciDevice', pool: Pool): """Add a remote_managed tag depending on a device type if needed. Network devices may be managed remotely, e.g. by a SmartNIC DPU. If a tag has not been explicitly provided, populate it by assuming that a device is not remote managed by default. """ if dev.dev_type not in (fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.SRIOV_PF, fields.PciDeviceType.VDPA): return # A tag is added here rather than at the client side to avoid an # issue with having objects without this tag specified during an # upgrade to the first version that supports handling this tag. if pool.get(PCI_REMOTE_MANAGED_TAG) is None: # NOTE: tags are compared as strings case-insensitively, see # pci_device_prop_match in nova/pci/utils.py. pool[PCI_REMOTE_MANAGED_TAG] = 'false' def _create_pool_keys_from_dev( self, dev: 'objects.PciDevice', ) -> ty.Optional[Pool]: """Create a stats pool dict that this dev is supposed to be part of Note that this pool dict contains the stats pool's keys and their values. 'count' and 'devices' are not included. """ # Don't add a device that doesn't have a matching device spec. # This can happen during initial sync up with the controller devspec = self.dev_filter.get_devspec(dev) if not devspec: return None tags = devspec.get_tags() pool = {k: getattr(dev, k) for k in self.pool_keys} if tags: pool.update(tags) # NOTE(gibi): parent_ifname acts like a tag during pci claim but # not provided as part of the whitelist spec as it is auto detected # by the virt driver. # This key is used for match InstancePciRequest backed by neutron ports # that has resource_request and therefore that has resource allocation # already in placement. if dev.extra_info.get('parent_ifname'): pool['parent_ifname'] = dev.extra_info['parent_ifname'] self._ensure_remote_managed_tag(dev, pool) return pool def _get_pool_with_device_type_mismatch( self, dev: 'objects.PciDevice', ) -> ty.Optional[ty.Tuple[Pool, 'objects.PciDevice']]: """Check for device type mismatch in the pools for a given device. Return (pool, device) if device type does not match or a single None if the device type matches. """ for pool in self.pools: for device in pool['devices']: if device.address == dev.address: if dev.dev_type != pool["dev_type"]: return pool, device return None return None def update_device(self, dev: 'objects.PciDevice') -> None: """Update a device to its matching pool.""" pool_device_info = self._get_pool_with_device_type_mismatch(dev) if pool_device_info is None: return None pool, device = pool_device_info pool['devices'].remove(device) self._decrease_pool_count(self.pools, pool) self.add_device(dev) def add_device(self, dev: 'objects.PciDevice') -> None: """Add a device to its matching pool.""" dev_pool = self._create_pool_keys_from_dev(dev) if dev_pool: pool = self._find_pool(dev_pool) if not pool: dev_pool['count'] = 0 dev_pool['devices'] = [] self.pools.append(dev_pool) self.pools.sort(key=lambda item: len(item)) pool = dev_pool pool['count'] += 1 pool['devices'].append(dev) @staticmethod def _decrease_pool_count( pool_list: ty.List[Pool], pool: Pool, count: int = 1, ) -> int: """Decrement pool's size by count. If pool becomes empty, remove pool from pool_list. """ if pool['count'] > count: pool['count'] -= count count = 0 else: count -= pool['count'] pool_list.remove(pool) return count def remove_device(self, dev: 'objects.PciDevice') -> None: """Remove one device from the first pool that it matches.""" dev_pool = self._create_pool_keys_from_dev(dev) if dev_pool: pool = self._find_pool(dev_pool) if not pool: raise exception.PciDevicePoolEmpty( compute_node_id=dev.compute_node_id, address=dev.address) pool['devices'].remove(dev) self._decrease_pool_count(self.pools, pool) def get_free_devs(self) -> ty.List['objects.PciDevice']: free_devs: ty.List[objects.PciDevice] = [] for pool in self.pools: free_devs.extend(pool['devices']) return free_devs def consume_requests( self, pci_requests: 'objects.InstancePCIRequests', numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> ty.Optional[ty.List['objects.PciDevice']]: alloc_devices: ty.List[objects.PciDevice] = [] for request in pci_requests: count = request.count pools = self._filter_pools(self.pools, request, numa_cells) # Failed to allocate the required number of devices. Return the # devices already allocated during previous iterations back to # their pools if not pools: LOG.error("Failed to allocate PCI devices for instance. " "Unassigning devices back to pools. " "This should not happen, since the scheduler " "should have accurate information, and allocation " "during claims is controlled via a hold " "on the compute node semaphore.") for d in range(len(alloc_devices)): self.add_device(alloc_devices.pop()) return None for pool in pools: if pool['count'] >= count: num_alloc = count else: num_alloc = pool['count'] count -= num_alloc pool['count'] -= num_alloc for d in range(num_alloc): pci_dev = pool['devices'].pop() self._handle_device_dependents(pci_dev) pci_dev.request_id = request.request_id alloc_devices.append(pci_dev) if count == 0: break return alloc_devices def _handle_device_dependents(self, pci_dev: 'objects.PciDevice') -> None: """Remove device dependents or a parent from pools. In case the device is a PF, all of it's dependent VFs should be removed from pools count, if these are present. When the device is a VF, or a VDPA device, it's parent PF pool count should be decreased, unless it is no longer in a pool. """ if pci_dev.dev_type == fields.PciDeviceType.SRIOV_PF: vfs_list = pci_dev.child_devices if vfs_list: free_devs = self.get_free_devs() for vf in vfs_list: # NOTE(gibi): do not try to remove a device that are # already removed if vf in free_devs: self.remove_device(vf) elif pci_dev.dev_type in ( fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA, ): try: parent = pci_dev.parent_device # Make sure not to decrease PF pool count if this parent has # been already removed from pools if parent in self.get_free_devs(): self.remove_device(parent) except exception.PciDeviceNotFound: return def _filter_pools_for_spec( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools that don't match the request's device spec. Exclude pools that do not match the specified ``vendor_id``, ``product_id`` and/or ``device_type`` field, or any of the other arbitrary tags such as ``physical_network``, specified in the request. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ request_specs = request.spec return [ pool for pool in pools if utils.pci_device_prop_match(pool, request_specs) ] def _filter_pools_for_numa_cells( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']], ) -> ty.List[Pool]: """Filter out pools with the wrong NUMA affinity, if required. Exclude pools that do not have *suitable* PCI NUMA affinity. ``numa_policy`` determines what *suitable* means, being one of PREFERRED (nice-to-have), LEGACY (must-have-if-available) and REQUIRED (must-have). We iterate through the various policies in order of strictness. This means that even if we only *prefer* PCI-NUMA affinity, we will still attempt to provide it if possible. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells. :returns: A list of pools that can, together, provide at least ``requested_count`` PCI devices with the level of NUMA affinity required by ``numa_policy``, else all pools that can satisfy this policy even if it's not enough. """ if not numa_cells: return pools # we default to the 'legacy' policy for...of course...legacy reasons requested_policy = fields.PCINUMAAffinityPolicy.LEGACY if 'numa_policy' in request: requested_policy = request.numa_policy or requested_policy requested_count = request.count numa_cell_ids = [cell.id for cell in numa_cells] # filter out pools which numa_node is not included in numa_cell_ids filtered_pools = [ pool for pool in pools if any(utils.pci_device_prop_match( pool, [{'numa_node': cell}]) for cell in numa_cell_ids)] # we can't apply a less strict policy than the one requested, so we # need to return if we've demanded a NUMA affinity of REQUIRED. # However, NUMA affinity is a good thing. If we can get enough devices # with the stricter policy then we will use them. if requested_policy == fields.PCINUMAAffinityPolicy.REQUIRED or sum( pool['count'] for pool in filtered_pools) >= requested_count: return filtered_pools # the SOCKET policy is a bit of a special case. It's less strict than # REQUIRED (so REQUIRED will automatically fulfil SOCKET, at least # with our assumption of never having multiple sockets per NUMA node), # but not always more strict than LEGACY: a PCI device with no NUMA # affinity will fulfil LEGACY but not SOCKET. If we have SOCKET, # process it here and don't continue. if requested_policy == fields.PCINUMAAffinityPolicy.SOCKET: return self._filter_pools_for_socket_affinity(pools, numa_cells) # some systems don't report NUMA node info for PCI devices, in which # case None is reported in 'pci_device.numa_node'. The LEGACY policy # allows us to use these devices so we include None in the list of # suitable NUMA cells. numa_cell_ids.append(None) # filter out pools which numa_node is not included in numa_cell_ids filtered_pools = [ pool for pool in pools if any(utils.pci_device_prop_match( pool, [{'numa_node': cell}]) for cell in numa_cell_ids)] # once again, we can't apply a less strict policy than the one # requested, so we need to return if we've demanded a NUMA affinity of # LEGACY. Similarly, we will also return if we have enough devices to # satisfy this somewhat strict policy. if requested_policy == fields.PCINUMAAffinityPolicy.LEGACY or sum( pool['count'] for pool in filtered_pools) >= requested_count: return filtered_pools # if we've got here, we're using the PREFERRED policy and weren't able # to provide anything with stricter affinity. Use whatever devices you # can, folks. return sorted( pools, key=lambda pool: pool.get('numa_node') not in numa_cell_ids) def _filter_pools_for_socket_affinity( self, pools: ty.List[Pool], numa_cells: ty.List['objects.InstanceNUMACell'], ) -> ty.List[Pool]: host_cells = self.numa_topology.cells # bail early if we don't have socket information for all host_cells. # This could happen if we're running on an weird older system with # multiple sockets per NUMA node, which is a configuration that we # explicitly chose not to support. if any(cell.socket is None for cell in host_cells): LOG.debug('No socket information in host NUMA cell(s).') return [] # get a set of host sockets that the guest cells are in. Since guest # cell IDs map to host cell IDs, we can just lookup the latter's # socket. socket_ids = set() for guest_cell in numa_cells: for host_cell in host_cells: if guest_cell.id == host_cell.id: socket_ids.add(host_cell.socket) # now get a set of host NUMA nodes that are in the above sockets allowed_numa_nodes = set() for host_cell in host_cells: if host_cell.socket in socket_ids: allowed_numa_nodes.add(host_cell.id) # filter out pools that are not in one of the correct host NUMA nodes. return [ pool for pool in pools if any( utils.pci_device_prop_match(pool, [{'numa_node': numa_node}]) for numa_node in allowed_numa_nodes ) ] def _filter_pools_for_unrequested_pfs( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools with PFs, unless these are required. This is necessary in cases where PFs and VFs have the same product_id and generally useful elsewhere. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ if all( spec.get('dev_type') != fields.PciDeviceType.SRIOV_PF for spec in request.spec ): pools = [ pool for pool in pools if not pool.get('dev_type') == fields.PciDeviceType.SRIOV_PF ] return pools def _filter_pools_for_unrequested_vdpa_devices( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools with VDPA devices, unless these are required. This is necessary as vdpa devices require special handling and should not be allocated to generic pci device requests. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ if all( spec.get('dev_type') != fields.PciDeviceType.VDPA for spec in request.spec ): pools = [ pool for pool in pools if not pool.get('dev_type') == fields.PciDeviceType.VDPA ] return pools def _filter_pools_for_unrequested_remote_managed_devices( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools with remote_managed devices, unless requested. Remote-managed devices are not usable for legacy SR-IOV or hardware offload scenarios and must be excluded from allocation. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ if all(not strutils.bool_from_string(spec.get(PCI_REMOTE_MANAGED_TAG)) for spec in request.spec): pools = [pool for pool in pools if not strutils.bool_from_string( pool.get(PCI_REMOTE_MANAGED_TAG))] return pools def _filter_pools( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']], ) -> ty.Optional[ty.List[Pool]]: """Determine if an individual PCI request can be met. Filter pools, which are collections of devices with similar traits, to identify those that can support the provided PCI request. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``request.numa_policy``. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACell objects. :returns: A list of pools that can be used to support the request if this is possible, else None. """ # NOTE(vladikr): This code may be open to race conditions. # Two concurrent requests may succeed when called support_requests # because this method does not remove related devices from the pools # Firstly, let's exclude all devices that don't match our spec (e.g. # they've got different PCI IDs or something) before_count = sum([pool['count'] for pool in pools]) pools = self._filter_pools_for_spec(pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) due to mismatched PCI attribute(s)', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None # Next, let's exclude all devices that aren't on the correct NUMA node # or socket, *assuming* we have devices and care about that, as # determined by policy before_count = after_count pools = self._filter_pools_for_numa_cells(pools, request, numa_cells) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are on the wrong NUMA node(s)', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None # If we're not requesting PFs then we should not use these. # Exclude them. before_count = after_count pools = self._filter_pools_for_unrequested_pfs(pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are PFs which we have not ' 'requested', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None # If we're not requesting VDPA devices then we should not use these # either. Exclude them. before_count = after_count pools = self._filter_pools_for_unrequested_vdpa_devices(pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are VDPA devices which we have ' 'not requested', before_count - after_count ) # If we're not requesting remote_managed devices then we should not # use these either. Exclude them. before_count = after_count pools = self._filter_pools_for_unrequested_remote_managed_devices( pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are remote-managed devices which' 'we have not requested', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None return pools def support_requests( self, requests: ty.List['objects.InstancePCIRequest'], numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> bool: """Determine if the PCI requests can be met. Determine, based on a compute node's PCI stats, if an instance can be scheduled on the node. **Support does not mean real allocation**. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``numa_policy``. :param requests: A list of InstancePCIRequest object describing the types, quantities and required NUMA affinities of devices we want. :type requests: nova.objects.InstancePCIRequests :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells, or None. :returns: Whether this compute node can satisfy the given request. """ # NOTE(yjiang5): this function has high possibility to fail, # so no exception should be triggered for performance reason. return all( self._filter_pools(self.pools, r, numa_cells) for r in requests ) def _apply_request( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> bool: """Apply an individual PCI request. Apply a PCI request against a given set of PCI device pools, which are collections of devices with similar traits. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``request.numa_policy``. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACell objects. :returns: True if the request was applied against the provided pools successfully, else False. """ # NOTE(vladikr): This code maybe open to race conditions. # Two concurrent requests may succeed when called support_requests # because this method does not remove related devices from the pools filtered_pools = self._filter_pools(pools, request, numa_cells) if not filtered_pools: return False count = request.count for pool in filtered_pools: count = self._decrease_pool_count(pools, pool, count) if not count: break return True def apply_requests( self, requests: ty.List['objects.InstancePCIRequest'], numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> None: """Apply PCI requests to the PCI stats. This is used in multiple instance creation, when the scheduler has to maintain how the resources are consumed by the instances. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``numa_policy``. :param requests: A list of InstancePCIRequest object describing the types, quantities and required NUMA affinities of devices we want. :type requests: nova.objects.InstancePCIRequests :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells, or None. :raises: exception.PciDeviceRequestFailed if this compute node cannot satisfy the given request. """ if not all( self._apply_request(self.pools, r, numa_cells) for r in requests ): raise exception.PciDeviceRequestFailed(requests=requests) def __iter__(self) -> ty.Iterator[Pool]: pools: ty.List[Pool] = [] for pool in self.pools: pool = copy.deepcopy(pool) # 'devices' shouldn't be part of stats if 'devices' in pool: del pool['devices'] pools.append(pool) return iter(pools) def clear(self) -> None: """Clear all the stats maintained.""" self.pools = [] def __eq__(self, other: object) -> bool: if not isinstance(other, PciDeviceStats): return NotImplemented return self.pools == other.pools def to_device_pools_obj(self) -> 'objects.PciDevicePoolList': """Return the contents of the pools as a PciDevicePoolList object.""" stats = [x for x in self] return pci_device_pool.from_pci_stats(stats) def has_remote_managed_device_pools(self) -> bool: """Determine whether remote managed device pools are present on a host. The check is pool-based, not free device-based and is NUMA cell agnostic. """ dummy_req = objects.InstancePCIRequest( count=0, spec=[{'remote_managed': True}] ) pools = self._filter_pools_for_spec(self.pools, dummy_req) return bool(pools)
41.708945
79
0.628073
4,075
31,240
4.694969
0.146012
0.009722
0.009879
0.010245
0.441721
0.391177
0.358562
0.331121
0.326051
0.312356
0
0.001008
0.301536
31,240
748
80
41.764706
0.875802
0.409027
0
0.369231
0
0
0.104684
0.027765
0
0
0
0.001337
0
1
0.071795
false
0.002564
0.030769
0.002564
0.207692
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a55f8c89efdf9367ae5e51c6555c781fae366b6
1,368
py
Python
examples/capture_circular.py
IanTBlack/picamera2
4d31a56cdb0d8360e71927e754fc6bef50bec360
[ "BSD-2-Clause" ]
71
2022-02-15T14:24:34.000Z
2022-03-29T16:36:46.000Z
examples/capture_circular.py
IanTBlack/picamera2
4d31a56cdb0d8360e71927e754fc6bef50bec360
[ "BSD-2-Clause" ]
37
2022-02-16T12:35:45.000Z
2022-03-31T13:18:42.000Z
examples/capture_circular.py
IanTBlack/picamera2
4d31a56cdb0d8360e71927e754fc6bef50bec360
[ "BSD-2-Clause" ]
15
2022-02-16T12:12:57.000Z
2022-03-31T15:17:58.000Z
#!/usr/bin/python3 import time import numpy as np from picamera2.encoders import H264Encoder from picamera2.outputs import CircularOutput from picamera2 import Picamera2 lsize = (320, 240) picam2 = Picamera2() video_config = picam2.video_configuration(main={"size": (1280, 720), "format": "RGB888"}, lores={"size": lsize, "format": "YUV420"}) picam2.configure(video_config) picam2.start_preview() encoder = H264Encoder(1000000, repeat=True) encoder.output = CircularOutput() picam2.encoder = encoder picam2.start() picam2.start_encoder() w, h = lsize prev = None encoding = False ltime = 0 while True: cur = picam2.capture_buffer("lores") cur = cur[:w * h].reshape(h, w) if prev is not None: # Measure pixels differences between current and # previous frame mse = np.square(np.subtract(cur, prev)).mean() if mse > 7: if not encoding: epoch = int(time.time()) encoder.output.fileoutput = "{}.h264".format(epoch) encoder.output.start() encoding = True print("New Motion", mse) ltime = time.time() else: if encoding and time.time() - ltime > 5.0: encoder.output.stop() encoding = False prev = cur picam2.stop_encoder()
27.918367
89
0.604532
158
1,368
5.189873
0.481013
0.063415
0.041463
0
0
0
0
0
0
0
0
0.05499
0.282164
1,368
48
90
28.5
0.780041
0.057749
0
0.051282
0
0
0.041991
0
0
0
0
0
0
1
0
false
0
0.128205
0
0.128205
0.025641
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a561a673ebb04da901d20e99ce9c86e3955a26e
8,933
py
Python
Bio/NeuralNetwork/Gene/Pattern.py
barendt/biopython
391bcdbee7f821bff3e12b75c635a06bc1b2dcea
[ "PostgreSQL" ]
3
2017-10-23T21:53:57.000Z
2019-09-23T05:14:12.000Z
Bio/NeuralNetwork/Gene/Pattern.py
eoc21/biopython
c0f8db8f55a506837c320459957a0ce99b0618b6
[ "PostgreSQL" ]
null
null
null
Bio/NeuralNetwork/Gene/Pattern.py
eoc21/biopython
c0f8db8f55a506837c320459957a0ce99b0618b6
[ "PostgreSQL" ]
6
2020-02-26T16:34:20.000Z
2020-03-04T15:34:00.000Z
"""Generic functionality useful for all gene representations. This module contains classes which can be used for all the different types of patterns available for representing gene information (ie. motifs, signatures and schemas). These are the general classes which should be handle any of the different specific patterns. """ # standard library import random # biopython from Bio import utils from Bio.Seq import Seq, MutableSeq class PatternIO: """Allow reading and writing of patterns to files. This just defines a simple persistance class for patterns, making it easy to write them to a file and read 'em back. """ def __init__(self, alphabet = None): """Intialize the reader and writer class. Arguments: o alphabet - An optional argument specifying the alphabet which patterns should follow. If an alphabet is set it'll be used to verify that all patterns follow it. Attributes: o separator - A character to use in separating items in a signature when it is written to a file and read back. This character should not be in the possible alphabet of the sequences, or there will be trouble. """ self._alphabet = alphabet self.separator = ";" def write(self, pattern_list, output_handle): """Write a list of patterns to the given handle. """ for pattern in pattern_list: # deal with signatures, concatentate them with the separator if (type(pattern) == type([]) or type(pattern) == type(tuple([]))): string_pattern = self.separator.join(pattern) # deal with the normal cases else: string_pattern = pattern output_handle.write("%s\n" % string_pattern) def write_seq(self, seq_pattern_list, output_handle): """Convenience function to write Seq objects to a file. This can take Seqs and MutableSeqs, and write them to a file as strings. """ # convert the seq patterns into just string patterns all_patterns = [] for seq_pattern in seq_pattern_list: if isinstance(seq_pattern, MutableSeq): seq = seq_pattern.toseq() all_patterns.append(seq.data) elif isinstance(seq_pattern, Seq): all_patterns.append(seq_pattern.data) else: raise ValueError("Unexpected pattern type %r" % seq_pattern) self.write(all_patterns, output_handle) def read(self, input_handle): """Read patterns from the specified handle. """ all_patterns = [] while 1: cur_line = input_handle.readline() if not(cur_line): break cur_pattern = cur_line.rstrip() # split up signatures if cur_pattern.find(self.separator) >= 0: cur_pattern = tuple(cur_pattern.split(self.separator)) if self._alphabet is not None: # make single patterns (not signatures) into lists, so we # can check signatures and single patterns the same if type(cur_pattern) != type(tuple([])): test_pattern = [cur_pattern] else: test_pattern = cur_pattern for pattern_item in test_pattern: pattern_seq = Seq(pattern_item, self._alphabet) if not(utils.verify_alphabet(pattern_seq)): raise ValueError("Pattern %s not matching alphabet %s" % (cur_pattern, self._alphabet)) all_patterns.append(cur_pattern) return all_patterns class PatternRepository: """This holds a list of specific patterns found in sequences. This is designed to be a general holder for a set of patterns and should be subclassed for specific implementations (ie. holding Motifs or Signatures. """ def __init__(self, pattern_info): """Initialize a repository with patterns, Arguments: o pattern_info - A representation of all of the patterns found in a *Finder search. This should be a dictionary, where the keys are patterns, and the values are the number of times a pattern is found. The patterns are represented interally as a list of two tuples, where the first element is the number of times a pattern occurs, and the second is the pattern itself. This makes it easy to sort the list and return the top N patterns. """ self._pattern_dict = pattern_info # create the list representation self._pattern_list = [] for pattern_name in self._pattern_dict.keys(): self._pattern_list.append((self._pattern_dict[pattern_name], pattern_name)) self._pattern_list.sort() self._pattern_list.reverse() def get_all(self): """Retrieve all of the patterns in the repository. """ patterns = [] for pattern_info in self._pattern_list: patterns.append(pattern_info[1]) return patterns def get_random(self, num_patterns): """Retrieve the specified number of patterns randomly. Randomly selects patterns from the list and returns them. Arguments: o num_patterns - The total number of patterns to return. """ all_patterns = [] while len(all_patterns) < num_patterns: # pick a pattern, and only add it if it is not already present new_pattern_info = random.choice(self._pattern_list) if new_pattern_info[1] not in all_patterns: all_patterns.append(new_pattern_info[1]) return all_patterns def get_top_percentage(self, percent): """Return a percentage of the patterns. This returns the top 'percent' percentage of the patterns in the repository. """ all_patterns = self.get_all() num_to_return = int(len(all_patterns) * percent) return all_patterns[:num_to_return] def get_top(self, num_patterns): """Return the specified number of most frequently occurring patterns Arguments: o num_patterns - The number of patterns to return. """ all_patterns = [] for pattern_info in self._pattern_list[:num_patterns]: all_patterns.append(pattern_info[1]) return all_patterns def get_differing(self, top_num, bottom_num): """Retrieve patterns that are at the extreme ranges. This returns both patterns at the top of the list (ie. the same as returned by get_top) and at the bottom of the list. This is especially useful for patterns that are the differences between two sets of patterns. Arguments: o top_num - The number of patterns to take from the top of the list. o bottom_num - The number of patterns to take from the bottom of the list. """ all_patterns = [] # first get from the top of the list for pattern_info in self._pattern_list[:top_num]: all_patterns.append(pattern_info[1]) # then from the bottom for pattern_info in self._pattern_list[-bottom_num:]: all_patterns.append(pattern_info[1]) return all_patterns def remove_polyA(self, at_percentage = .9): """Remove patterns which are likely due to polyA tails from the lists. This is just a helper function to remove pattenrs which are likely just due to polyA tails, and thus are not really great motifs. This will also get rid of stuff like ATATAT, which might be a useful motif, so use at your own discretion. XXX Could we write a more general function, based on info content or something like that? Arguments: o at_percentage - The percentage of A and T residues in a pattern that qualifies it for being removed. """ remove_list = [] # find all of the really AT rich patterns for pattern_info in self._pattern_list: pattern_at = float(pattern_info[1].count('A') + pattern_info[1].count('T')) / len(pattern_info[1]) if pattern_at > at_percentage: remove_list.append(pattern_info) # now remove them from the master list for to_remove in remove_list: self._pattern_list.remove(to_remove) def count(self, pattern): """Return the number of times the specified pattern is found. """ try: return self._pattern_dict[pattern] except KeyError: return 0
35.169291
110
0.622187
1,139
8,933
4.733977
0.240562
0.046921
0.033383
0.014837
0.147997
0.120364
0.089392
0.059904
0.031157
0.018175
0
0.002129
0.316355
8,933
253
111
35.3083
0.880793
0.431994
0
0.175258
0
0
0.015038
0
0
0
0
0
0
1
0.123711
false
0
0.030928
0
0.257732
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a588636dc362efae84b790a87924f429a4e4039
33,745
py
Python
epsilon/juice.py
twisted/epsilon
783910e1829688e95719a7d3151ec3e2cbb101fd
[ "MIT" ]
4
2017-09-01T18:49:11.000Z
2020-04-21T10:11:33.000Z
epsilon/juice.py
twisted/epsilon
783910e1829688e95719a7d3151ec3e2cbb101fd
[ "MIT" ]
35
2015-01-16T22:12:44.000Z
2021-07-11T11:28:58.000Z
epsilon/juice.py
twisted/epsilon
783910e1829688e95719a7d3151ec3e2cbb101fd
[ "MIT" ]
8
2015-01-24T17:43:58.000Z
2019-09-01T12:38:41.000Z
# -*- test-case-name: epsilon.test.test_juice -*- # Copyright 2005 Divmod, Inc. See LICENSE file for details import warnings, pprint import keyword import io import six from twisted.internet.main import CONNECTION_LOST from twisted.internet.defer import Deferred, maybeDeferred, fail from twisted.internet.protocol import ServerFactory, ClientFactory from twisted.internet.ssl import Certificate from twisted.python.failure import Failure from twisted.python import log, filepath from epsilon.liner import LineReceiver from epsilon.compat import long from epsilon import extime ASK = '_ask' ANSWER = '_answer' COMMAND = '_command' ERROR = '_error' ERROR_CODE = '_error_code' ERROR_DESCRIPTION = '_error_description' LENGTH = '_length' BODY = 'body' debug = False class JuiceBox(dict): """ I am a packet in the JUICE protocol. """ def __init__(self, __body='', **kw): self.update(kw) if __body: assert isinstance(__body, str), "body must be a string: %r" % ( repr(__body),) self['body'] = __body def body(): def get(self): warnings.warn("body attribute of boxes is now just a regular field", stacklevel=2) return self['body'] def set(self, newbody): warnings.warn("body attribute of boxes is now just a regular field", stacklevel=2) self['body'] = newbody return get,set body = property(*body()) def copy(self): newBox = self.__class__() newBox.update(self) return newBox def serialize(self, delimiter=b'\r\n', escaped=b'\r\n '): assert LENGTH not in self delimiter = six.ensure_binary(delimiter) escaped = six.ensure_binary(escaped) L = [] for (k, v) in six.viewitems(self): if k == BODY: k = LENGTH v = str(len(self[BODY])) L.append(six.ensure_binary(k).replace(b'_', b'-').title()) L.append(b': ') L.append(six.ensure_binary(v).replace(delimiter, escaped)) L.append(delimiter) L.append(delimiter) if BODY in self: L.append(six.ensure_binary(self[BODY])) return b''.join(L) def sendTo(self, proto): """ Serialize and send this box to a Juice instance. By the time it is being sent, several keys are required. I must have exactly ONE of:: -ask -answer -error If the '-ask' header is set, then the '-command' header must also be set. """ proto.sendPacket(self) # juice.Box => JuiceBox Box = JuiceBox class TLSBox(JuiceBox): def __repr__(self): return 'TLS(**%s)' % (super(TLSBox, self).__repr__(),) def __init__(self, __certificate, __verify=None, __sslstarted=None, **kw): super(TLSBox, self).__init__(**kw) self.certificate = __certificate self.verify = __verify self.sslstarted = __sslstarted def sendTo(self, proto): super(TLSBox, self).sendTo(proto) if self.verify is None: proto.startTLS(self.certificate) else: proto.startTLS(self.certificate, self.verify) if self.sslstarted is not None: self.sslstarted() class QuitBox(JuiceBox): def __repr__(self): return 'Quit(**%s)' % (super(QuitBox, self).__repr__(),) def sendTo(self, proto): super(QuitBox, self).sendTo(proto) proto.transport.loseConnection() class _SwitchBox(JuiceBox): def __repr__(self): return 'Switch(**%s)' % (super(_SwitchBox, self).__repr__(),) def __init__(self, __proto, **kw): super(_SwitchBox, self).__init__(**kw) self.innerProto = __proto def sendTo(self, proto): super(_SwitchBox, self).sendTo(proto) proto._switchTo(self.innerProto) class NegotiateBox(JuiceBox): def __repr__(self): return 'Negotiate(**%s)' % (super(NegotiateBox, self).__repr__(),) def sendTo(self, proto): super(NegotiateBox, self).sendTo(proto) proto._setProtocolVersion(int(self['version'])) class JuiceError(Exception): pass class RemoteJuiceError(JuiceError): """ This error indicates that something went wrong on the remote end of the connection, and the error was serialized and transmitted to you. """ def __init__(self, errorCode, description, fatal=False): """Create a remote error with an error code and description. """ Exception.__init__(self, "Remote[%s]: %s" % (errorCode, description)) self.errorCode = errorCode self.description = description self.fatal = fatal class UnhandledRemoteJuiceError(RemoteJuiceError): def __init__(self, description): errorCode = b"UNHANDLED" RemoteJuiceError.__init__(self, errorCode, description) class JuiceBoxError(JuiceError): pass class MalformedJuiceBox(JuiceBoxError): pass class UnhandledCommand(JuiceError): pass class IncompatibleVersions(JuiceError): pass class _Transactor: def __init__(self, store, callable): self.store = store self.callable = callable def __call__(self, box): return self.store.transact(self.callable, box) def __repr__(self): return '<Transaction in: %s of: %s>' % (self.store, self.callable) class DispatchMixin: baseDispatchPrefix = 'juice_' autoDispatchPrefix = 'command_' wrapper = None def _auto(self, aCallable, proto, namespace=None): if aCallable is None: return None command = aCallable.command if namespace not in command.namespaces: # if you're in the wrong namespace, you are very likely not allowed # to invoke the command you are trying to invoke. some objects # have commands exposed in a separate namespace for security # reasons, since the security model is a role : namespace mapping. log.msg('WRONG NAMESPACE: %r, %r' % (namespace, command.namespaces)) return None def doit(box): kw = stringsToObjects(box, command.arguments, proto) for name, extraArg in command.extra: kw[name] = extraArg.fromTransport(proto.transport) # def checkIsDict(result): # if not isinstance(result, dict): # raise RuntimeError("%r returned %r, not dictionary" % ( # aCallable, result)) # return result def checkKnownErrors(error): key = error.trap(*command.allErrors) code = command.allErrors[key] desc = str(error.value) return Failure(RemoteJuiceError( code, desc, error in command.fatalErrors)) return maybeDeferred(aCallable, **kw).addCallback( command.makeResponse, proto).addErrback( checkKnownErrors) return doit def _wrap(self, aCallable): if aCallable is None: return None wrap = self.wrapper if wrap is not None: return wrap(aCallable) else: return aCallable def normalizeCommand(self, cmd): """Return the canonical form of a command. """ return cmd.upper().strip().replace('-', '_') def lookupFunction(self, proto, name, namespace): """Return a callable to invoke when executing the named command. """ # Try to find a method to be invoked in a transaction first # Otherwise fallback to a "regular" method fName = self.autoDispatchPrefix + name fObj = getattr(self, fName, None) if fObj is not None: # pass the namespace along return self._auto(fObj, proto, namespace) assert namespace is None, 'Old-style parsing' # Fall back to simplistic command dispatching - we probably want to get # rid of this eventually, there's no reason to do extra work and write # fewer docs all the time. fName = self.baseDispatchPrefix + name return getattr(self, fName, None) def dispatchCommand(self, proto, cmd, box, namespace=None): fObj = self.lookupFunction(proto, self.normalizeCommand(cmd), namespace) if fObj is None: return fail(UnhandledCommand(cmd)) return maybeDeferred(self._wrap(fObj), box) def normalizeKey(key): lkey = six.ensure_str(key).lower().replace('-', '_') if keyword.iskeyword(lkey): return lkey.title() return lkey def parseJuiceHeaders(lines): """ Create a JuiceBox from a list of header lines. @param lines: a list of lines. @type lines: a list of L{bytes} """ b = JuiceBox() key = None for L in lines: if L[0:1] == b' ': # continuation assert key is not None b[key] += six.ensure_str(b'\r\n' + L[1:]) continue parts = L.split(b': ', 1) if len(parts) != 2: raise MalformedJuiceBox("Wrong number of parts: %r" % (L,)) key, value = parts key = normalizeKey(key) b[key] = six.ensure_str(value) return int(b.pop(LENGTH, 0)), b class JuiceParserBase(DispatchMixin): def __init__(self): self._outstandingRequests = {} def _puke(self, failure): log.msg("Juice server or network failure " "unhandled by client application:") log.err(failure) log.msg( "Dropping connection! " "To avoid, add errbacks to ALL remote commands!") if self.transport is not None: self.transport.loseConnection() _counter = long(0) def _nextTag(self): self._counter += 1 return '%x' % (self._counter,) def failAllOutgoing(self, reason): OR = self._outstandingRequests.items() self._outstandingRequests = None # we can never send another request for key, value in OR: value.errback(reason) def juiceBoxReceived(self, box): if debug: log.msg("Juice receive: %s" % pprint.pformat(dict(six.viewitems(box)))) if ANSWER in box: question = self._outstandingRequests.pop(box[ANSWER]) question.addErrback(self._puke) self._wrap(question.callback)(box) elif ERROR in box: question = self._outstandingRequests.pop(box[ERROR]) question.addErrback(self._puke) self._wrap(question.errback)( Failure(RemoteJuiceError(box[ERROR_CODE], box[ERROR_DESCRIPTION]))) elif COMMAND in box: cmd = box[COMMAND] def sendAnswer(answerBox): if ASK not in box: return if self.transport is None: return answerBox[ANSWER] = box[ASK] answerBox.sendTo(self) def sendError(error): if ASK not in box: return error if error.check(RemoteJuiceError): code = error.value.errorCode desc = error.value.description if error.value.fatal: errorBox = QuitBox() else: errorBox = JuiceBox() else: errorBox = QuitBox() log.err(error) # here is where server-side logging happens # if the error isn't handled code = 'UNHANDLED' desc = "Unhandled Remote System Exception " errorBox[ERROR] = box[ASK] errorBox[ERROR_DESCRIPTION] = desc errorBox[ERROR_CODE] = code if self.transport is not None: errorBox.sendTo(self) return None # intentionally stop the error here: don't log the # traceback if it's handled, do log it (earlier) if # it isn't self.dispatchCommand(self, cmd, box).addCallbacks(sendAnswer, sendError ).addErrback(self._puke) else: raise RuntimeError( "Empty packet received over connection-oriented juice: %r" % (box,)) def sendBoxCommand(self, command, box, requiresAnswer=True): """ Send a command across the wire with the given C{juice.Box}. Returns a Deferred which fires with the response C{juice.Box} when it is received, or fails with a C{juice.RemoteJuiceError} if an error is received. If the Deferred fails and the error is not handled by the caller of this method, the failure will be logged and the connection dropped. """ if self._outstandingRequests is None: return fail(CONNECTION_LOST) box[COMMAND] = command tag = self._nextTag() if requiresAnswer: box[ASK] = tag result = self._outstandingRequests[tag] = Deferred() else: result = None box.sendTo(self) return result class Argument: optional = False def __init__(self, optional=False): self.optional = optional def retrieve(self, d, name): if self.optional: value = d.get(name) if value is not None: del d[name] else: value = d.pop(name) return value def fromBox(self, name, strings, objects, proto): st = self.retrieve(strings, name) if self.optional and st is None: objects[name] = None else: objects[name] = self.fromStringProto(st, proto) def toBox(self, name, strings, objects, proto): obj = self.retrieve(objects, name) if self.optional and obj is None: # strings[name] = None return else: strings[name] = self.toStringProto(obj, proto) def fromStringProto(self, inString, proto): return self.fromString(inString) def toStringProto(self, inObject, proto): return self.toString(inObject) def fromString(self, inString): raise NotImplementedError() def toString(self, inObject): raise NotImplementedError() class JuiceList(Argument): def __init__(self, subargs): self.subargs = subargs def fromStringProto(self, inString, proto): boxes = parseString(six.ensure_binary(inString)) values = [stringsToObjects(box, self.subargs, proto) for box in boxes] return values def toStringProto(self, inObject, proto): return b''.join([ objectsToStrings(objects, self.subargs, Box(), proto).serialize() for objects in inObject ]) class ListOf(Argument): def __init__(self, subarg, delimiter=', '): self.subarg = subarg self.delimiter = delimiter def fromStringProto(self, inString, proto): strings = inString.split(self.delimiter) L = [self.subarg.fromStringProto(string, proto) for string in strings] return L def toStringProto(self, inObject, proto): L = [] for inSingle in inObject: outString = self.subarg.toStringProto(inSingle, proto) assert self.delimiter not in outString L.append(outString) return self.delimiter.join(L) class Integer(Argument): fromString = int def toString(self, inObject): return str(int(inObject)) class String(Argument): def toString(self, inObject): return inObject def fromString(self, inString): return inString class EncodedString(Argument): def __init__(self, encoding): self.encoding = encoding def toString(self, inObject): return inObject.encode(self.encoding) def fromString(self, inString): return inString.decode(self.encoding) # Temporary backwards compatibility for Exponent Body = String class Unicode(String): def toString(self, inObject): # assert isinstance(inObject, unicode) return String.toString(self, inObject.encode('utf-8')) def fromString(self, inString): # assert isinstance(inString, str) return String.fromString(self, inString).decode('utf-8') class Path(Unicode): def fromString(self, inString): return filepath.FilePath(Unicode.fromString(self, inString)) def toString(self, inObject): return Unicode.toString(self, inObject.path) class Float(Argument): fromString = float toString = str class Base64Binary(Argument): def toString(self, inObject): return inObject.encode('base64').replace('\n', '') def fromString(self, inString): return inString.decode('base64') class Time(Argument): def toString(self, inObject): return inObject.asISO8601TimeAndDate() def fromString(self, inString): return extime.Time.fromISO8601TimeAndDate(inString) class ExtraArg: def fromTransport(self, inTransport): raise NotImplementedError() class Peer(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QPeer() class PeerDomain(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QPeer().domain class PeerUser(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QPeer().resource class Host(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QHost() class HostDomain(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QHost().domain class HostUser(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QHost().resource class Boolean(Argument): def fromString(self, inString): if inString == 'True': return True elif inString == 'False': return False else: raise RuntimeError("Bad boolean value: %r" % (inString,)) def toString(self, inObject): if inObject: return 'True' else: return 'False' class _CommandMeta(type): def __new__(cls, name, bases, attrs): re = attrs['reverseErrors'] = {} er = attrs['allErrors'] = {} for v, k in six.viewitems(attrs.get('errors',{})): re[k] = v er[v] = k for v, k in six.viewitems(attrs.get('fatalErrors',{})): re[k] = v er[v] = k return type.__new__(cls, name, bases, attrs) @six.add_metaclass(_CommandMeta) class Command: arguments = [] response = [] extra = [] namespaces = [None] # This is set to [None] on purpose: None means # "no namespace", not "empty list". "empty # list" will make your command invalid in _all_ # namespaces, effectively uncallable. errors = {} fatalErrors = {} commandType = Box responseType = Box def commandName(): def get(self): return self.__class__.__name__ raise NotImplementedError("Missing command name") return get, commandName = property(*commandName()) def __init__(self, **kw): self.structured = kw givenArgs = [normalizeKey(k) for k in kw.keys()] forgotten = [] for name, arg in self.arguments: if normalizeKey(name) not in givenArgs and not arg.optional: forgotten.append(normalizeKey(name)) # for v in kw.itervalues(): # if v is None: # from pprint import pformat # raise RuntimeError("ARGH: %s" % pformat(kw)) if forgotten: if len(forgotten) == 1: plural = 'an argument' else: plural = 'some arguments' raise RuntimeError("You forgot %s to %r: %s" % ( plural, self.commandName, ', '.join(forgotten))) forgotten = [] def makeResponse(cls, objects, proto): try: return objectsToStrings(objects, cls.response, cls.responseType(), proto) except: log.msg("Exception in %r.makeResponse" % (cls,)) raise makeResponse = classmethod(makeResponse) def do(self, proto, namespace=None, requiresAnswer=True): if namespace is not None: cmd = namespace + ":" + self.commandName else: cmd = self.commandName def _massageError(error): error.trap(RemoteJuiceError) rje = error.value return Failure(self.reverseErrors.get(rje.errorCode, UnhandledRemoteJuiceError)(rje.description)) d = proto.sendBoxCommand( cmd, objectsToStrings(self.structured, self.arguments, self.commandType(), proto), requiresAnswer) if requiresAnswer: d.addCallback(stringsToObjects, self.response, proto) d.addCallback(self.addExtra, proto.transport) d.addErrback(_massageError) return d def addExtra(self, d, transport): for name, extraArg in self.extra: d[name] = extraArg.fromTransport(transport) return d class ProtocolSwitchCommand(Command): """Use this command to switch from something Juice-derived to a different protocol mid-connection. This can be useful to use juice as the connection-startup negotiation phase. Since TLS is a different layer entirely, you can use Juice to negotiate the security parameters of your connection, then switch to a different protocol, and the connection will remain secured. """ def __init__(self, __protoToSwitchToFactory, **kw): self.protoToSwitchToFactory = __protoToSwitchToFactory super(ProtocolSwitchCommand, self).__init__(**kw) def makeResponse(cls, innerProto, proto): return _SwitchBox(innerProto) makeResponse = classmethod(makeResponse) def do(self, proto, namespace=None): d = super(ProtocolSwitchCommand, self).do(proto) proto._lock() def switchNow(ign): innerProto = self.protoToSwitchToFactory.buildProtocol(proto.transport.getPeer()) proto._switchTo(innerProto, self.protoToSwitchToFactory) return ign def die(ign): proto.transport.loseConnection() return ign def handle(ign): self.protoToSwitchToFactory.clientConnectionFailed(None, Failure(CONNECTION_LOST)) return ign return d.addCallbacks(switchNow, handle).addErrback(die) class Negotiate(Command): commandName = 'Negotiate' arguments = [('versions', ListOf(Integer()))] response = [('version', Integer())] responseType = NegotiateBox class Juice(LineReceiver, JuiceParserBase, object): """ JUICE (JUice Is Concurrent Events) is a simple connection-oriented request/response protocol. Packets, or "boxes", are collections of RFC2822-inspired headers, plus a body. Note that this is NOT a literal interpretation of any existing RFC, 822, 2822 or otherwise, but a simpler version that does not do line continuations, does not specify any particular format for header values, dispatches semantic meanings of most headers on the -Command header rather than giving them global meaning, and allows multiple sets of headers (messages, or JuiceBoxes) on a connection. All headers whose names begin with a dash ('-') are reserved for use by the protocol. All others are for application use - their meaning depends on the value of the "-Command" header. """ protocolName = b'juice-base' hostCertificate = None MAX_LENGTH = 1024 * 1024 isServer = property(lambda self: self._issueGreeting, doc=""" True if this is a juice server, e.g. it is going to issue or has issued a server greeting upon connection. """) isClient = property(lambda self: not self._issueGreeting, doc=""" True if this is a juice server, e.g. it is not going to issue or did not issue a server greeting upon connection. """) def __init__(self, issueGreeting): """ @param issueGreeting: whether to issue a greeting when connected. This should be set on server-side Juice protocols. """ JuiceParserBase.__init__(self) self._issueGreeting = issueGreeting def __repr__(self): return '<%s %s/%s at 0x%x>' % (self.__class__.__name__, self.isClient and 'client' or 'server', self.innerProtocol, id(self)) __locked = False def _lock(self): """ Lock this Juice instance so that no further Juice traffic may be sent. This is used when sending a request to switch underlying protocols. You probably want to subclass ProtocolSwitchCommand rather than calling this directly. """ self.__locked = True innerProtocol = None def _switchTo(self, newProto, clientFactory=None): """ Switch this Juice instance to a new protocol. You need to do this 'simultaneously' on both ends of a connection; the easiest way to do this is to use a subclass of ProtocolSwitchCommand. """ assert self.innerProtocol is None, "Protocol can only be safely switched once." self.setRawMode() self.innerProtocol = newProto self.innerProtocolClientFactory = clientFactory newProto.makeConnection(self.transport) innerProtocolClientFactory = None def juiceBoxReceived(self, box): if self.__locked and COMMAND in box and ASK in box: # This is a command which will trigger an answer, and we can no # longer answer anything, so don't bother delivering it. return return super(Juice, self).juiceBoxReceived(box) def sendPacket(self, completeBox): """ Send a juice.Box to my peer. Note: transport.write is never called outside of this method. """ assert not self.__locked, "You cannot send juice packets when a connection is locked" if self._startingTLSBuffer is not None: self._startingTLSBuffer.append(completeBox) else: if debug: log.msg("Juice send: %s" % pprint.pformat(dict(six.viewitems(completeBox)))) result = completeBox.serialize() self.transport.write(result) def sendCommand(self, command, __content='', __answer=True, **kw): box = JuiceBox(__content, **kw) return self.sendBoxCommand(command, box, requiresAnswer=__answer) _outstandingRequests = None _justStartedTLS = False def makeConnection(self, transport): self._transportPeer = transport.getPeer() self._transportHost = transport.getHost() log.msg("%s %s connection established (HOST:%s PEER:%s)" % (self.isClient and "client" or "server", self.__class__.__name__, self._transportHost, self._transportPeer)) self._outstandingRequests = {} self._requestBuffer = [] LineReceiver.makeConnection(self, transport) _startingTLSBuffer = None def prepareTLS(self): self._startingTLSBuffer = [] def startTLS(self, certificate, *verifyAuthorities): if self.hostCertificate is None: self.hostCertificate = certificate self._justStartedTLS = True self.transport.startTLS(certificate.options(*verifyAuthorities)) stlsb = self._startingTLSBuffer if stlsb is not None: self._startingTLSBuffer = None for box in stlsb: self.sendPacket(box) else: raise RuntimeError( "Previously authenticated connection between %s and %s " "is trying to re-establish as %s" % ( self.hostCertificate, Certificate.peerFromTransport(self.transport), (certificate, verifyAuthorities))) def dataReceived(self, data): # If we successfully receive any data after TLS has been started, that # means the connection was secured properly. Make a note of that fact. if self._justStartedTLS: self._justStartedTLS = False return LineReceiver.dataReceived(self, data) def connectionLost(self, reason): log.msg("%s %s connection lost (HOST:%s PEER:%s)" % ( self.isClient and 'client' or 'server', self.__class__.__name__, self._transportHost, self._transportPeer)) self.failAllOutgoing(reason) if self.innerProtocol is not None: self.innerProtocol.connectionLost(reason) if self.innerProtocolClientFactory is not None: self.innerProtocolClientFactory.clientConnectionLost(None, reason) def lineReceived(self, line): if line: self._requestBuffer.append(line) else: buf = self._requestBuffer self._requestBuffer = [] bodylen, b = parseJuiceHeaders(buf) if bodylen: self._bodyRemaining = bodylen self._bodyBuffer = [] self._pendingBox = b self.setRawMode() else: self.juiceBoxReceived(b) def rawDataReceived(self, data): if self.innerProtocol is not None: self.innerProtocol.dataReceived(data) return self._bodyRemaining -= len(data) if self._bodyRemaining <= 0: if self._bodyRemaining < 0: self._bodyBuffer.append(data[:self._bodyRemaining]) extraData = data[self._bodyRemaining:] else: self._bodyBuffer.append(data) extraData = '' self._pendingBox['body'] = six.ensure_str(b''.join(six.ensure_binary(each) for each in self._bodyBuffer)) self._bodyBuffer = None b, self._pendingBox = self._pendingBox, None self.juiceBoxReceived(b) if self.innerProtocol is not None: self.innerProtocol.makeConnection(self.transport) if extraData: self.innerProtocol.dataReceived(extraData) else: self.setLineMode(extraData) else: self._bodyBuffer.append(data) protocolVersion = 0 def _setProtocolVersion(self, version): # if we ever want to actually mangle encodings, this is the place to do # it! self.protocolVersion = version return version def renegotiateVersion(self, newVersion): assert newVersion in VERSIONS, ( "This side of the connection doesn't support version %r" % (newVersion,)) v = VERSIONS[:] v.remove(newVersion) return Negotiate(versions=[newVersion]).do(self).addCallback( lambda ver: self._setProtocolVersion(ver['version'])) def command_NEGOTIATE(self, versions): for version in versions: if version in VERSIONS: return dict(version=version) raise IncompatibleVersions() command_NEGOTIATE.command = Negotiate VERSIONS = [1] class _ParserHelper(Juice): def __init__(self): Juice.__init__(self, False) self.boxes = [] self.results = Deferred() def getPeer(self): return 'string' def getHost(self): return 'string' disconnecting = False def juiceBoxReceived(self, box): self.boxes.append(box) # Synchronous helpers def parse(cls, fileObj): p = cls() p.makeConnection(p) p.dataReceived(fileObj.read()) return p.boxes parse = classmethod(parse) def parseString(cls, data): with io.BytesIO(data) as f: return cls.parse(f) parseString = classmethod(parseString) parse = _ParserHelper.parse parseString = _ParserHelper.parseString def stringsToObjects(strings, arglist, proto): objects = {} myStrings = strings.copy() for argname, argparser in arglist: argparser.fromBox(argname, myStrings, objects, proto) return objects def objectsToStrings(objects, arglist, strings, proto): myObjects = {} for (k, v) in objects.items(): myObjects[normalizeKey(k)] = v for argname, argparser in arglist: argparser.toBox(argname, strings, myObjects, proto) return strings class JuiceServerFactory(ServerFactory): protocol = Juice def buildProtocol(self, addr): prot = self.protocol(True) prot.factory = self return prot class JuiceClientFactory(ClientFactory): protocol = Juice def buildProtocol(self, addr): prot = self.protocol(False) prot.factory = self return prot
33.410891
133
0.602874
3,557
33,745
5.621872
0.187799
0.007601
0.008251
0.010352
0.165325
0.108466
0.088113
0.061259
0.032005
0.020503
0
0.002611
0.30766
33,745
1,009
134
33.444004
0.853315
0.141325
0
0.226124
0
0
0.060525
0
0
0
0
0
0.011236
1
0.164326
false
0.007022
0.018258
0.044944
0.428371
0.004213
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a5913eb8964167841ec2eb740f4b32d39ad706a
7,290
py
Python
bonsai3/simulator_client.py
kirillpol-ms/bonsai3-py
ede9c2c1d25d784d61b7cbf1438a257b5d592274
[ "MIT" ]
null
null
null
bonsai3/simulator_client.py
kirillpol-ms/bonsai3-py
ede9c2c1d25d784d61b7cbf1438a257b5d592274
[ "MIT" ]
3
2020-06-01T18:43:55.000Z
2020-08-14T17:44:54.000Z
bonsai3/simulator_client.py
BonsaiAI/bonsai3-py
29158cc58f39604fa96e10e41ff00fc195f6b315
[ "MIT" ]
2
2020-06-16T14:24:17.000Z
2020-08-13T00:27:31.000Z
""" Client for simulator requests """ __copyright__ = "Copyright 2020, Microsoft Corp." # pyright: strict from random import uniform import time from typing import Union import jsons import requests from .exceptions import RetryTimeoutError, ServiceError from .logger import Logger from .simulator_protocol import ( ServiceConfig, SimulatorEvent, SimulatorEventRequest, SimulatorInterface, ) log = Logger() _RETRYABLE_ERROR_CODES = {502, 503, 504} _MAXIMUM_BACKOFF_SECONDS = 60 _BACKOFF_BASE_MULTIPLIER_MILLISECONDS = 50 class SimulatorClient: def __init__(self, config: ServiceConfig): self._config = config self._retry_attempts = 0 self._retry_timeout = None self._session = requests.session() self._session.headers.update( {"Authorization": config.access_key, "Content-type": "application/json"} ) def register_simulator(self, interface: SimulatorInterface) -> SimulatorEvent: return self._http_request(interface, self._config) def get_next_event(self, event_request: SimulatorEventRequest) -> SimulatorEvent: return self._http_request(event_request, self._config) def unregister_simulator(self, session_id: str): url = "{}/v2/workspaces/{}/simulatorSessions/{}".format( self._config.server, self._config.workspace, session_id ) log.debug("Sending unregister request to {}".format(url)) return self._session.delete(url, timeout=self._config.network_timeout_seconds) def _http_request( self, payload: Union[SimulatorInterface, SimulatorEventRequest], config: ServiceConfig, ) -> SimulatorEvent: res = None if self._retry_attempts >= 1: self._handle_retry() try: # NOTE: we assert these for the user here to allow the config object to be partially initialized before use. assert len( config.access_key ), "Environment variable SIM_ACCESS_KEY is unset or access_key is empty." assert len( config.workspace ), "Environment variable SIM_WORKSPACE is unset or workspace is empty." assert len( config.server ), "Environment variable SIM_API_HOST is unset or server is empty." # Register request if isinstance(payload, SimulatorInterface): reg_url = "{}/v2/workspaces/{}/simulatorSessions".format( config.server, config.workspace ) log.debug("Sending registration to {}".format(reg_url)) log.debug("Registration payload: {}".format(jsons.dumps(payload))) res = self._session.post( reg_url, json=jsons.loads(payload.json), headers={ "Authorization": config.access_key, "Content-type": "application/json", }, timeout=self._config.network_timeout_seconds, ) log.debug("Response to registration received.") # Get next event request if isinstance(payload, SimulatorEventRequest): log.network("Sending get next event request.") res = self._session.post( "{}/v2/workspaces/{}/simulatorSessions/{}/advance".format( config.server, config.workspace, payload.sessionId ), json=jsons.loads(jsons.dumps(payload)), headers={ "Authorization": config.access_key, "Content-type": "application/json", }, timeout=self._config.network_timeout_seconds, ) log.network("Response to get next event request received.") except requests.exceptions.Timeout as err: log.error(err) self._retry_attempts += 1 return self._http_request(payload, config) except requests.exceptions.RequestException as err: if res is not None: log.error(res.text) log.error(err) raise if res is not None: if res.status_code in _RETRYABLE_ERROR_CODES: log.debug( "Service returned {}, a retryable response error code." " Retrying request.".format(res.status_code) ) self._retry_attempts += 1 return self._http_request(payload, config) # bail on error if res.status_code != 200 and res.status_code != 201: log.error( "Received response with {} http status code. " "Raising exception.".format(res.status_code) ) if res.text: log.error(res.text) raise ServiceError( "Unable to get next event for simulator, " "received {} http status code".format(res.status_code) ) # TODO estee: this needs validation # SimulatorEvent self._retry_attempts = 0 self._retry_timeout = None return self._event_from_json(res.text) raise RuntimeError( "Usage error: Somehow http response ended up as none. " "Check arguments to _http_request and ensure the payload " "is either of type SimulatorInterface or SimulatorEventRequest" ) def _event_from_json(self, json_text: str) -> SimulatorEvent: """Converts a json string into a SimulatorEvent.""" event_dict = jsons.loads(json_text) log.debug("Event Response: {}".format(event_dict)) return SimulatorEvent(event_dict) def _handle_retry(self): log.network("handling retry.") if ( self._retry_timeout and time.time() > self._retry_timeout ) or self._config.retry_timeout_seconds == 0: raise RetryTimeoutError("Simulator Retry time exceeded.") if self._config.retry_timeout_seconds > 0 and self._retry_timeout is None: self._retry_timeout = time.time() + self._config.retry_timeout_seconds log.info( "Simulator will timeout in {} seconds if it is not able " "to connect to the platform.".format(self._retry_timeout - time.time()) ) self._backoff() log.network("retry handled.") def _backoff(self): """ Implements Exponential backoff algorithm with full jitter Check the following url for more information https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ """ power_of_two = 2 ** self._retry_attempts max_sleep = min( power_of_two * _BACKOFF_BASE_MULTIPLIER_MILLISECONDS / 1000.0, _MAXIMUM_BACKOFF_SECONDS, ) sleep = uniform(0, max_sleep) log.debug( "Retry attempt: {}, backing off for {} seconds".format( self._retry_attempts, sleep ) ) time.sleep(sleep)
38.167539
120
0.58834
735
7,290
5.635374
0.273469
0.03042
0.02873
0.02028
0.216079
0.142926
0.105746
0.105746
0.07436
0.07436
0
0.008163
0.327846
7,290
190
121
38.368421
0.837143
0.065706
0
0.190789
0
0
0.18377
0.02162
0
0
0
0.005263
0.019737
1
0.052632
false
0
0.052632
0.013158
0.157895
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a5a90584312df812f9d84d198fd00ed22ebcb67
3,042
py
Python
tweet_evaluator.py
tw-ddis/Gnip-Tweet-Evaluation
c5c847698bd6deb891870e5cf2514dfe78caa1c2
[ "MIT" ]
3
2019-11-14T11:46:27.000Z
2021-01-16T06:04:46.000Z
tweet_evaluator.py
pen-corsica/Gnip-Tweet-Evaluation
c5c847698bd6deb891870e5cf2514dfe78caa1c2
[ "MIT" ]
1
2017-09-19T22:59:03.000Z
2017-09-19T23:06:12.000Z
tweet_evaluator.py
pen-corsica/Gnip-Tweet-Evaluation
c5c847698bd6deb891870e5cf2514dfe78caa1c2
[ "MIT" ]
4
2016-06-13T16:34:32.000Z
2017-08-01T20:20:56.000Z
#!/usr/bin/env python import argparse import logging try: import ujson as json except ImportError: import json import sys import datetime import os import importlib from gnip_tweet_evaluation import analysis,output """ Perform audience and/or conversation analysis on a set of Tweets. """ logger = logging.getLogger('analysis') logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-n","--identifier",dest="unique_identifier", default='0',type=str, help="a unique name to identify the conversation/audience; default is '%(default)s'") parser.add_argument("-c","--do-conversation-analysis",dest="do_conversation_analysis",action="store_true",default=False, help="do conversation analysis on Tweets") parser.add_argument("-a","--do-audience-analysis",dest="do_audience_analysis",action="store_true",default=False, help="do audience analysis on users") parser.add_argument("-i","--input-file-name",dest="input_file_name",default=None, help="file containing Tweet data; take input from stdin if not present") parser.add_argument('-o','--output-dir',dest='output_directory',default=os.environ['HOME'] + '/tweet_evaluation/', help='directory for output files; default is %(default)s') parser.add_argument('-b','--baseline-input-file',dest='baseline_input_name',default=None, help='Tweets against which to run a relative analysis') args = parser.parse_args() # get the time right now, to use in output naming time_now = datetime.datetime.now() output_directory = '{0}/{1:04d}/{2:02d}/{3:02d}/'.format(args.output_directory.rstrip('/') ,time_now.year ,time_now.month ,time_now.day ) # get the empty results object, which defines the measurements to be run results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis) baseline_results = None if args.baseline_input_name is not None: baseline_results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis) # manage input sources, file opening, and deserialization if args.input_file_name is not None: tweet_generator = analysis.deserialize_tweets(open(args.input_file_name)) else: tweet_generator = analysis.deserialize_tweets(sys.stdin) # run analysis analysis.analyze_tweets(tweet_generator, results) # run baseline analysis, if requests if baseline_results is not None: baseline_tweet_generator = analysis.deserialize_tweets(open(args.baseline_input_name)) analysis.analyze_tweets(baseline_tweet_generator, baseline_results) results = analysis.compare_results(results,baseline_results) # dump the output output.dump_results(results, output_directory, args.unique_identifier)
42.25
140
0.72288
389
3,042
5.449871
0.329049
0.046226
0.048113
0.046698
0.22783
0.209434
0.209434
0.133019
0.09434
0.09434
0
0.004355
0.169625
3,042
71
141
42.84507
0.834917
0.085141
0
0
0
0
0.230115
0.052904
0
0
0
0
0
1
0
false
0
0.204082
0
0.204082
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a5b61c287644aa1eac5b1af996dc433d21c0841
2,621
py
Python
app.py
admiral-aokiji/whatsapp-bot
5a0b0d4afddc679cda3670771934cb472629587a
[ "MIT" ]
null
null
null
app.py
admiral-aokiji/whatsapp-bot
5a0b0d4afddc679cda3670771934cb472629587a
[ "MIT" ]
null
null
null
app.py
admiral-aokiji/whatsapp-bot
5a0b0d4afddc679cda3670771934cb472629587a
[ "MIT" ]
null
null
null
from flask import Flask, request import os from twilio.twiml.messaging_response import MessagingResponse from selenium import webdriver chrome_options = webdriver.ChromeOptions() chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN") chrome_options.add_argument("--headless") chrome_options.add_argument("--disable-dev-shm-usage") chrome_options.add_argument("--no-sandbox") driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), chrome_options=chrome_options) app = Flask(__name__) import utils @app.route("/") def hello(): return "Hello World!" @app.route('/bot', methods=['POST']) def bot(): incoming_msg = request.values.get('Body', '') print(incoming_msg) resp = MessagingResponse() msg = resp.message() responded = False if incoming_msg in ['Hi', 'Hey', 'Menu']: text = f'Hello\n For any suggestions or requests 👇 \n 📞 : 9537701631 \n ✉ : [email protected] \n\n Please enter one of the following option 👇 \n *TPC*. TPC portal willingness \n *B*. __________. ' msg.body(text) responded = True elif 'TPC' in incoming_msg: if incoming_msg == 'TPC': text = 'Menu of options for TPC command' msg.body(text) h = 7 responded = True utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD')) if incoming_msg == 'TPC -willingness -short' or incoming_msg == 'TPC -w -s': utils.getWillingness() utils.shortenWillingness() elif incoming_msg == 'TPC -willingness -details' or incoming_msg == 'TPC -w -d': utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD')) utils.getWillingness() elif incoming_msg == 'TPC -willingness -details' or incoming_msg == 'TPC -w -d': utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD')) utils.getWillingness() elif incoming_msg[:15] == 'TPC -experience' or (incoming_msg[:7] == 'TPC - e ' and len(incoming_msg)>8): utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD')) companyName = incoming_msg.split(' ')[2] print(companyName) utils.getInterviewExperience(companyName) else: # send custom error msg for TPC commands pass else: # Checking for formality if responded == False: msg.body('Please enter valid commands') return str(resp) if __name__ == "__main__": app.run(host="localhost", port=5000, debug=True)
39.119403
217
0.645174
328
2,621
4.981707
0.393293
0.100979
0.073439
0.073439
0.241126
0.230722
0.230722
0.230722
0.230722
0.230722
0
0.010816
0.22396
2,621
66
218
39.712121
0.79056
0.023274
0
0.272727
0
0.018182
0.227397
0.020744
0
0
0
0
0
1
0.036364
false
0.018182
0.090909
0.018182
0.163636
0.036364
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a5cfd1895fbfd5a40ac1b9716a706c236f16372
2,309
py
Python
dynamic_setting/tests/test_models.py
koralarts/django-dynamic-settings
8a3c5f44ad71f6d8fb78af9e7a3f5a380dd3d318
[ "MIT" ]
2
2015-02-11T05:07:19.000Z
2015-11-24T17:49:03.000Z
dynamic_setting/tests/test_models.py
koralarts/django-dynamic-settings
8a3c5f44ad71f6d8fb78af9e7a3f5a380dd3d318
[ "MIT" ]
1
2018-03-02T13:26:08.000Z
2018-03-02T13:26:08.000Z
dynamic_setting/tests/test_models.py
koralarts/django-dynamic-settings
8a3c5f44ad71f6d8fb78af9e7a3f5a380dd3d318
[ "MIT" ]
null
null
null
from django.test import TestCase from dynamic_setting.models import Setting class SettingTestCase(TestCase): def _create_setting(self, name, **kwargs): return Setting.objects.create(name=name, **kwargs) def test_create_setting(self): """ Test Creating a new Setting. """ name = 'TEST_SETTING' data = 'Setting Data' setting = self._create_setting(name, data=data) self.assertEqual(setting.name, name) self.assertEqual(setting.__str__(), name) self.assertEqual(setting.data, data) def test_create_setting_no_data(self): """ Test Creating a new setting without Data. """ name = 'TEST_SETTING' data = '-' setting = self._create_setting(name) self.assertEqual(setting.name, name) self.assertEqual(setting.__str__(), name) self.assertEqual(setting.data, data) def test_delete_setting(self): """ Test Deleting a setting object. """ name = 'TEST_SETTING' setting = self._create_setting(name) setting_pk = setting.pk setting.delete() try: Setting.objects.get(pk=setting_pk) except Setting.DoesNotExist: pass else: self.fail('Setting with ID {} should not exist.'.format(setting_pk)) def test_get_setting(self): """ Test Getting a setting object. """ name = 'TEST_SETTING' data = 'Setting data' setting = self._create_setting(name, data=data) try: setting2 = Setting.objects.get(pk=setting.pk) except Setting.DoesNotExist: self.fail('Setting with ID {} should exist'.format(setting.pk)) self.assertEqual(setting.name, setting2.name) self.assertEqual(setting.__str__(), setting2.__str__()) self.assertEqual(setting.data, setting2.data) self.assertEqual(setting.pk, setting2.pk) def test_update_setting(self): """ Test Updating a setting object. """ name = 'TEST_SETTING' data = 'Setting data' data2 = 'New Setting Data' setting = self._create_setting(name, data=data) setting.data = data2 setting.save() setting2 = Setting.objects.get(pk=setting.pk) self.assertEqual(setting2.data, data2)
36.078125
80
0.628411
262
2,309
5.351145
0.183206
0.094151
0.156919
0.11127
0.584879
0.538516
0.440799
0.381598
0.381598
0.231098
0
0.005872
0.262451
2,309
64
81
36.078125
0.817381
0.073192
0
0.470588
0
0
0.085349
0
0
0
0
0
0.215686
1
0.117647
false
0.019608
0.039216
0.019608
0.196078
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a5d7ccdf81701102bd40960b2c34a8fefe0bff7
3,973
py
Python
homeassistant/components/zamg/weather.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
homeassistant/components/zamg/weather.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
24,710
2016-04-13T08:27:26.000Z
2020-03-02T12:59:13.000Z
homeassistant/components/zamg/weather.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Sensor for data from Austrian Zentralanstalt für Meteorologie.""" from __future__ import annotations import logging import voluptuous as vol from homeassistant.components.weather import ( ATTR_WEATHER_HUMIDITY, ATTR_WEATHER_PRESSURE, ATTR_WEATHER_TEMPERATURE, ATTR_WEATHER_WIND_BEARING, ATTR_WEATHER_WIND_SPEED, PLATFORM_SCHEMA, WeatherEntity, ) from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS from homeassistant.core import HomeAssistant from homeassistant.helpers import config_validation as cv from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType # Reuse data and API logic from the sensor implementation from .sensor import ( ATTRIBUTION, CONF_STATION_ID, ZamgData, closest_station, zamg_stations, ) _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_STATION_ID): cv.string, vol.Inclusive( CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together" ): cv.latitude, vol.Inclusive( CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together" ): cv.longitude, } ) def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the ZAMG weather platform.""" name = config.get(CONF_NAME) latitude = config.get(CONF_LATITUDE, hass.config.latitude) longitude = config.get(CONF_LONGITUDE, hass.config.longitude) station_id = config.get(CONF_STATION_ID) or closest_station( latitude, longitude, hass.config.config_dir ) if station_id not in zamg_stations(hass.config.config_dir): _LOGGER.error( "Configured ZAMG %s (%s) is not a known station", CONF_STATION_ID, station_id, ) return probe = ZamgData(station_id=station_id) try: probe.update() except (ValueError, TypeError) as err: _LOGGER.error("Received error from ZAMG: %s", err) return add_entities([ZamgWeather(probe, name)], True) class ZamgWeather(WeatherEntity): """Representation of a weather condition.""" def __init__(self, zamg_data, stationname=None): """Initialise the platform with a data instance and station name.""" self.zamg_data = zamg_data self.stationname = stationname @property def name(self): """Return the name of the sensor.""" return ( self.stationname or f"ZAMG {self.zamg_data.data.get('Name') or '(unknown station)'}" ) @property def condition(self): """Return the current condition.""" return None @property def attribution(self): """Return the attribution.""" return ATTRIBUTION @property def temperature(self): """Return the platform temperature.""" return self.zamg_data.get_data(ATTR_WEATHER_TEMPERATURE) @property def temperature_unit(self): """Return the unit of measurement.""" return TEMP_CELSIUS @property def pressure(self): """Return the pressure.""" return self.zamg_data.get_data(ATTR_WEATHER_PRESSURE) @property def humidity(self): """Return the humidity.""" return self.zamg_data.get_data(ATTR_WEATHER_HUMIDITY) @property def wind_speed(self): """Return the wind speed.""" return self.zamg_data.get_data(ATTR_WEATHER_WIND_SPEED) @property def wind_bearing(self): """Return the wind bearing.""" return self.zamg_data.get_data(ATTR_WEATHER_WIND_BEARING) def update(self): """Update current conditions.""" self.zamg_data.update()
28.582734
87
0.678832
454
3,973
5.729075
0.259912
0.042291
0.041522
0.034602
0.110727
0.110727
0.110727
0.110727
0.030757
0
0
0
0.231311
3,973
138
88
28.789855
0.85167
0.131387
0
0.151515
0
0
0.071302
0.009467
0
0
0
0
0
1
0.121212
false
0
0.10101
0
0.343434
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a5e2a2e683b7b168a4a8789ce91b511ae5da26d
19,403
py
Python
rasa/model.py
martasls/rasa
6e535a847f6be0c05e7b89208f16a53d2c478629
[ "Apache-2.0" ]
null
null
null
rasa/model.py
martasls/rasa
6e535a847f6be0c05e7b89208f16a53d2c478629
[ "Apache-2.0" ]
null
null
null
rasa/model.py
martasls/rasa
6e535a847f6be0c05e7b89208f16a53d2c478629
[ "Apache-2.0" ]
null
null
null
import copy import glob import hashlib import logging import os import shutil from subprocess import CalledProcessError, DEVNULL, check_output # skipcq:BAN-B404 import tempfile import typing from pathlib import Path from typing import Any, Text, Tuple, Union, Optional, List, Dict, NamedTuple from packaging import version from rasa.constants import MINIMUM_COMPATIBLE_VERSION import rasa.shared.utils.io import rasa.utils.io from rasa.cli.utils import create_output_path from rasa.shared.utils.cli import print_success from rasa.shared.constants import ( CONFIG_KEYS_CORE, CONFIG_KEYS_NLU, CONFIG_KEYS, DEFAULT_DOMAIN_PATH, DEFAULT_MODELS_PATH, DEFAULT_CORE_SUBDIRECTORY_NAME, DEFAULT_NLU_SUBDIRECTORY_NAME, ) from rasa.exceptions import ModelNotFound from rasa.utils.common import TempDirectoryPath if typing.TYPE_CHECKING: from rasa.shared.importers.importer import TrainingDataImporter logger = logging.getLogger(__name__) # Type alias for the fingerprint Fingerprint = Dict[Text, Union[Text, List[Text], int, float]] FINGERPRINT_FILE_PATH = "fingerprint.json" FINGERPRINT_CONFIG_KEY = "config" FINGERPRINT_CONFIG_CORE_KEY = "core-config" FINGERPRINT_CONFIG_NLU_KEY = "nlu-config" FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY = "config-without-epochs" FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY = "domain" FINGERPRINT_NLG_KEY = "nlg" FINGERPRINT_RASA_VERSION_KEY = "version" FINGERPRINT_STORIES_KEY = "stories" FINGERPRINT_NLU_DATA_KEY = "messages" FINGERPRINT_NLU_LABELS_KEY = "nlu_labels" FINGERPRINT_PROJECT = "project" FINGERPRINT_TRAINED_AT_KEY = "trained_at" class Section(NamedTuple): """Specifies which fingerprint keys decide whether this sub-model is retrained.""" name: Text relevant_keys: List[Text] SECTION_CORE = Section( name="Core model", relevant_keys=[ FINGERPRINT_CONFIG_KEY, FINGERPRINT_CONFIG_CORE_KEY, FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY, FINGERPRINT_STORIES_KEY, FINGERPRINT_RASA_VERSION_KEY, ], ) SECTION_NLU = Section( name="NLU model", relevant_keys=[ FINGERPRINT_CONFIG_KEY, FINGERPRINT_CONFIG_NLU_KEY, FINGERPRINT_NLU_DATA_KEY, FINGERPRINT_RASA_VERSION_KEY, ], ) SECTION_NLG = Section(name="NLG responses", relevant_keys=[FINGERPRINT_NLG_KEY]) class FingerprintComparisonResult: """Container for the results of a fingerprint comparison.""" def __init__( self, nlu: bool = True, core: bool = True, nlg: bool = True, force_training: bool = False, ): """Creates a `FingerprintComparisonResult` instance. Args: nlu: `True` if the NLU model should be retrained. core: `True` if the Core model should be retrained. nlg: `True` if the responses in the domain should be updated. force_training: `True` if a training of all parts is forced. """ self.nlu = nlu self.core = core self.nlg = nlg self.force_training = force_training def is_training_required(self) -> bool: """Check if anything has to be retrained.""" return any([self.nlg, self.nlu, self.core, self.force_training]) def should_retrain_core(self) -> bool: """Check if the Core model has to be updated.""" return self.force_training or self.core def should_retrain_nlg(self) -> bool: """Check if the responses have to be updated.""" return self.should_retrain_core() or self.nlg def should_retrain_nlu(self) -> bool: """Check if the NLU model has to be updated.""" return self.force_training or self.nlu def get_model(model_path: Text = DEFAULT_MODELS_PATH) -> TempDirectoryPath: """Get a model and unpack it. Raises a `ModelNotFound` exception if no model could be found at the provided path. Args: model_path: Path to the zipped model. If it's a directory, the latest trained model is returned. Returns: Path to the unpacked model. """ if not model_path: raise ModelNotFound("No path specified.") elif not os.path.exists(model_path): raise ModelNotFound(f"No file or directory at '{model_path}'.") if os.path.isdir(model_path): model_path = get_latest_model(model_path) if not model_path: raise ModelNotFound( f"Could not find any Rasa model files in '{model_path}'." ) elif not model_path.endswith(".tar.gz"): raise ModelNotFound(f"Path '{model_path}' does not point to a Rasa model file.") try: model_relative_path = os.path.relpath(model_path) except ValueError: model_relative_path = model_path logger.info(f"Loading model {model_relative_path}...") return unpack_model(model_path) def get_latest_model(model_path: Text = DEFAULT_MODELS_PATH) -> Optional[Text]: """Get the latest model from a path. Args: model_path: Path to a directory containing zipped models. Returns: Path to latest model in the given directory. """ if not os.path.exists(model_path) or os.path.isfile(model_path): model_path = os.path.dirname(model_path) list_of_files = glob.glob(os.path.join(model_path, "*.tar.gz")) if len(list_of_files) == 0: return None return max(list_of_files, key=os.path.getctime) def unpack_model( model_file: Text, working_directory: Optional[Union[Path, Text]] = None ) -> TempDirectoryPath: """Unpack a zipped Rasa model. Args: model_file: Path to zipped model. working_directory: Location where the model should be unpacked to. If `None` a temporary directory will be created. Returns: Path to unpacked Rasa model. """ import tarfile if working_directory is None: working_directory = tempfile.mkdtemp() # All files are in a subdirectory. try: with tarfile.open(model_file, mode="r:gz") as tar: tar.extractall(working_directory) logger.debug(f"Extracted model to '{working_directory}'.") except Exception as e: logger.error(f"Failed to extract model at {model_file}. Error: {e}") raise return TempDirectoryPath(working_directory) def get_model_subdirectories( unpacked_model_path: Text, ) -> Tuple[Optional[Text], Optional[Text]]: """Return paths for Core and NLU model directories, if they exist. If neither directories exist, a `ModelNotFound` exception is raised. Args: unpacked_model_path: Path to unpacked Rasa model. Returns: Tuple (path to Core subdirectory if it exists or `None` otherwise, path to NLU subdirectory if it exists or `None` otherwise). """ core_path = os.path.join(unpacked_model_path, DEFAULT_CORE_SUBDIRECTORY_NAME) nlu_path = os.path.join(unpacked_model_path, DEFAULT_NLU_SUBDIRECTORY_NAME) if not os.path.isdir(core_path): core_path = None if not os.path.isdir(nlu_path): nlu_path = None if not core_path and not nlu_path: raise ModelNotFound( "No NLU or Core data for unpacked model at: '{}'.".format( unpacked_model_path ) ) return core_path, nlu_path def create_package_rasa( training_directory: Text, output_filename: Text, fingerprint: Optional[Fingerprint] = None, ) -> Text: """Create a zipped Rasa model from trained model files. Args: training_directory: Path to the directory which contains the trained model files. output_filename: Name of the zipped model file to be created. fingerprint: A unique fingerprint to identify the model version. Returns: Path to zipped model. """ import tarfile if fingerprint: persist_fingerprint(training_directory, fingerprint) output_directory = os.path.dirname(output_filename) if not os.path.exists(output_directory): os.makedirs(output_directory) with tarfile.open(output_filename, "w:gz") as tar: for elem in os.scandir(training_directory): tar.add(elem.path, arcname=elem.name) shutil.rmtree(training_directory) return output_filename def project_fingerprint() -> Optional[Text]: """Create a hash for the project in the current working directory. Returns: project hash """ try: remote = check_output( # skipcq:BAN-B607,BAN-B603 ["git", "remote", "get-url", "origin"], stderr=DEVNULL ) return hashlib.sha256(remote).hexdigest() except (CalledProcessError, OSError): return None async def model_fingerprint(file_importer: "TrainingDataImporter") -> Fingerprint: """Create a model fingerprint from its used configuration and training data. Args: file_importer: File importer which provides the training data and model config. Returns: The fingerprint. """ import time config = await file_importer.get_config() domain = await file_importer.get_domain() stories = await file_importer.get_stories() nlu_data = await file_importer.get_nlu_data() responses = domain.responses # Do a copy of the domain to not change the actual domain (shallow is enough) domain = copy.copy(domain) # don't include the response texts in the fingerprint. # Their fingerprint is separate. domain.responses = {} return { FINGERPRINT_CONFIG_KEY: _get_fingerprint_of_config( config, exclude_keys=CONFIG_KEYS ), FINGERPRINT_CONFIG_CORE_KEY: _get_fingerprint_of_config( config, include_keys=CONFIG_KEYS_CORE ), FINGERPRINT_CONFIG_NLU_KEY: _get_fingerprint_of_config( config, include_keys=CONFIG_KEYS_NLU ), FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY: _get_fingerprint_of_config_without_epochs( config ), FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY: domain.fingerprint(), FINGERPRINT_NLG_KEY: rasa.shared.utils.io.deep_container_fingerprint(responses), FINGERPRINT_PROJECT: project_fingerprint(), FINGERPRINT_NLU_DATA_KEY: nlu_data.fingerprint(), FINGERPRINT_NLU_LABELS_KEY: nlu_data.label_fingerprint(), FINGERPRINT_STORIES_KEY: stories.fingerprint(), FINGERPRINT_TRAINED_AT_KEY: time.time(), FINGERPRINT_RASA_VERSION_KEY: rasa.__version__, } def _get_fingerprint_of_config( config: Optional[Dict[Text, Any]], include_keys: Optional[List[Text]] = None, exclude_keys: Optional[List[Text]] = None, ) -> Text: if not config: return "" keys = include_keys or list(filter(lambda k: k not in exclude_keys, config.keys())) sub_config = {k: config[k] for k in keys if k in config} return rasa.shared.utils.io.deep_container_fingerprint(sub_config) def _get_fingerprint_of_config_without_epochs( config: Optional[Dict[Text, Any]], ) -> Text: if not config: return "" copied_config = copy.deepcopy(config) for key in ["pipeline", "policies"]: if copied_config.get(key): for p in copied_config[key]: if "epochs" in p: del p["epochs"] return rasa.shared.utils.io.deep_container_fingerprint(copied_config) def fingerprint_from_path(model_path: Text) -> Fingerprint: """Load a persisted fingerprint. Args: model_path: Path to directory containing the fingerprint. Returns: The fingerprint or an empty dict if no fingerprint was found. """ if not model_path or not os.path.exists(model_path): return {} fingerprint_path = os.path.join(model_path, FINGERPRINT_FILE_PATH) if os.path.isfile(fingerprint_path): return rasa.shared.utils.io.read_json_file(fingerprint_path) else: return {} def persist_fingerprint(output_path: Text, fingerprint: Fingerprint): """Persist a model fingerprint. Args: output_path: Directory in which the fingerprint should be saved. fingerprint: The fingerprint to be persisted. """ path = os.path.join(output_path, FINGERPRINT_FILE_PATH) rasa.shared.utils.io.dump_obj_as_json_to_file(path, fingerprint) def did_section_fingerprint_change( fingerprint1: Fingerprint, fingerprint2: Fingerprint, section: Section ) -> bool: """Check whether the fingerprint of a section has changed.""" for k in section.relevant_keys: if fingerprint1.get(k) != fingerprint2.get(k): logger.info(f"Data ({k}) for {section.name} section changed.") return True return False def move_model(source: Text, target: Text) -> bool: """Move two model directories. Args: source: The original folder which should be merged in another. target: The destination folder where it should be moved to. Returns: `True` if the merge was successful, else `False`. """ try: shutil.move(source, target) return True except Exception as e: logging.debug(f"Could not merge model: {e}") return False def should_retrain( new_fingerprint: Fingerprint, old_model: Text, train_path: Text, has_e2e_examples: bool = False, force_training: bool = False, ) -> FingerprintComparisonResult: """Check which components of a model should be retrained. Args: new_fingerprint: The fingerprint of the new model to be trained. old_model: Path to the old zipped model file. train_path: Path to the directory in which the new model will be trained. has_e2e_examples: Whether the new training data contains e2e examples. force_training: Indicates if the model needs to be retrained even if the data has not changed. Returns: A FingerprintComparisonResult object indicating whether Rasa Core and/or Rasa NLU needs to be retrained or not. """ fingerprint_comparison = FingerprintComparisonResult() if old_model is None or not os.path.exists(old_model): return fingerprint_comparison with unpack_model(old_model) as unpacked: last_fingerprint = fingerprint_from_path(unpacked) old_core, old_nlu = get_model_subdirectories(unpacked) fingerprint_comparison = FingerprintComparisonResult( core=did_section_fingerprint_change( last_fingerprint, new_fingerprint, SECTION_CORE ), nlu=did_section_fingerprint_change( last_fingerprint, new_fingerprint, SECTION_NLU ), nlg=did_section_fingerprint_change( last_fingerprint, new_fingerprint, SECTION_NLG ), force_training=force_training, ) # We should retrain core if nlu data changes and there are e2e stories. if has_e2e_examples and fingerprint_comparison.should_retrain_nlu(): fingerprint_comparison.core = True core_merge_failed = False if not fingerprint_comparison.should_retrain_core(): target_path = os.path.join(train_path, DEFAULT_CORE_SUBDIRECTORY_NAME) core_merge_failed = not move_model(old_core, target_path) fingerprint_comparison.core = core_merge_failed if not fingerprint_comparison.should_retrain_nlg() and core_merge_failed: # If moving the Core model failed, we should also retrain NLG fingerprint_comparison.nlg = True if not fingerprint_comparison.should_retrain_nlu(): target_path = os.path.join(train_path, "nlu") fingerprint_comparison.nlu = not move_model(old_nlu, target_path) return fingerprint_comparison def can_finetune( last_fingerprint: Fingerprint, new_fingerprint: Fingerprint, core: bool = False, nlu: bool = False, ) -> bool: """Checks if components of a model can be finetuned with incremental training. Args: last_fingerprint: The fingerprint of the old model to potentially be fine-tuned. new_fingerprint: The fingerprint of the new model. core: Check sections for finetuning a core model. nlu: Check sections for finetuning an nlu model. Returns: `True` if the old model can be finetuned, `False` otherwise. """ section_keys = [ FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY, ] if core: section_keys.append(FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY) if nlu: section_keys.append(FINGERPRINT_NLU_LABELS_KEY) fingerprint_changed = did_section_fingerprint_change( last_fingerprint, new_fingerprint, Section(name="finetune", relevant_keys=section_keys), ) old_model_above_min_version = version.parse( last_fingerprint.get(FINGERPRINT_RASA_VERSION_KEY) ) >= version.parse(MINIMUM_COMPATIBLE_VERSION) return old_model_above_min_version and not fingerprint_changed def package_model( fingerprint: Fingerprint, output_directory: Text, train_path: Text, fixed_model_name: Optional[Text] = None, model_prefix: Text = "", ) -> Text: """ Compress a trained model. Args: fingerprint: fingerprint of the model output_directory: path to the directory in which the model should be stored train_path: path to uncompressed model fixed_model_name: name of the compressed model file model_prefix: prefix of the compressed model file Returns: path to 'tar.gz' model file """ output_directory = create_output_path( output_directory, prefix=model_prefix, fixed_name=fixed_model_name ) create_package_rasa(train_path, output_directory, fingerprint) print_success( "Your Rasa model is trained and saved at '{}'.".format( os.path.abspath(output_directory) ) ) return output_directory async def update_model_with_new_domain( importer: "TrainingDataImporter", unpacked_model_path: Union[Path, Text] ) -> None: """Overwrites the domain of an unpacked model with a new domain. Args: importer: Importer which provides the new domain. unpacked_model_path: Path to the unpacked model. """ model_path = Path(unpacked_model_path) / DEFAULT_CORE_SUBDIRECTORY_NAME domain = await importer.get_domain() domain.persist(model_path / DEFAULT_DOMAIN_PATH) def get_model_for_finetuning( previous_model_file: Optional[Union[Path, Text]] ) -> Optional[Text]: """Gets validated path for model to finetune. Args: previous_model_file: Path to model file which should be used for finetuning or a directory in case the latest trained model should be used. Returns: Path to model archive. `None` if there is no model. """ if Path(previous_model_file).is_dir(): logger.debug( f"Trying to load latest model from '{previous_model_file}' for " f"finetuning." ) return get_latest_model(previous_model_file) if Path(previous_model_file).is_file(): return previous_model_file logger.debug( "No valid model for finetuning found as directory either " "contains no model or model file cannot be found." ) return None
31.345719
89
0.687368
2,448
19,403
5.223448
0.137663
0.026746
0.006256
0.007977
0.232893
0.140846
0.105654
0.063502
0.035348
0.015641
0
0.001487
0.237541
19,403
618
90
31.39644
0.86285
0.240891
0
0.16763
0
0
0.066473
0.006531
0
0
0
0
0
1
0.060694
false
0
0.089595
0
0.251445
0.182081
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a5e9ccfe0101a01a8c7498e619dd38d0b22d208
2,484
py
Python
algorithmic_trading/backester_framework_test.py
CatalaniCD/quantitative_finance
c752516a43cd80914dcc8411aadd7b15a258d6a4
[ "MIT" ]
1
2021-08-20T19:17:10.000Z
2021-08-20T19:17:10.000Z
algorithmic_trading/backester_framework_test.py
CatalaniCD/quantitative_finance
c752516a43cd80914dcc8411aadd7b15a258d6a4
[ "MIT" ]
null
null
null
algorithmic_trading/backester_framework_test.py
CatalaniCD/quantitative_finance
c752516a43cd80914dcc8411aadd7b15a258d6a4
[ "MIT" ]
1
2021-10-04T07:44:02.000Z
2021-10-04T07:44:02.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jul 16 11:20:01 2021 @author: q GOAL : develop a backtester from a .py framework / library # installation : pip install backtesting # Documentation Index : - Manuals - Tutorials - Example Strategies - FAQ - License - API Reference Documentation source : https://kernc.github.io/backtesting.py/doc/backtesting/ # Features * Simple, well-documented API * Blazing fast execution * Built-in optimizer * Library of composable base strategies and utilities * Indicator-library-agnostic * Supports any financial instrument with candlestick data * Detailed results * Interactive visualizations """ # ============================================================================= # imports and settings # ============================================================================= # data handling import pandas as pd import numpy as np # import backtesting and set options import backtesting # Set notebook False backtesting.set_bokeh_output(notebook=False) from backtesting import Backtest, Strategy from backtesting.lib import crossover, cross from backtesting.test import SMA, GOOG # ============================================================================= # strategy definition # ============================================================================= class PriceAboveSMA(Strategy): _ma_period = 21 # Moving Average def init(self): # compute momentum """ Simple Moving Average Calc""" self.sma = self.I(SMA, self.data.Close, self._ma_period) def next(self): price = self.data.Close[-1] if not self.position and price > self.sma[-1]: # market entry self.buy() elif self.position and price < self.sma[-1]: # market exit self.position.close() # ============================================================================= # Program Execution # ============================================================================= if __name__ == '__main__': """ Instantiate the Backtester """ backtester = Backtest(GOOG, PriceAboveSMA, commission=.002, exclusive_orders=True, cash = 10000) PLOT = True """ Run a Single Backtest """ stats = backtester.run() print(stats) if PLOT: backtester.plot()
25.090909
79
0.515298
227
2,484
5.572687
0.621145
0.035573
0.020553
0.031621
0.053755
0.053755
0.053755
0.053755
0
0
0
0.014055
0.226651
2,484
98
80
25.346939
0.644456
0.574074
0
0
0
0
0.008307
0
0
0
0
0
0
1
0.083333
false
0
0.25
0
0.416667
0.041667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a5f7c637685db9897573cf124a2ab2c3a9ea578
408
py
Python
_sources/5-extra/opg-parameters-sneeuwvlok_solution.py
kooi/ippt-od
f1ba44ccfb72e6fcdfdc392fbfbec3e37c47b354
[ "MIT" ]
1
2018-08-21T21:05:41.000Z
2018-08-21T21:05:41.000Z
_sources/5-extra/opg-parameters-sneeuwvlok_solution.py
kooi/ippt-od
f1ba44ccfb72e6fcdfdc392fbfbec3e37c47b354
[ "MIT" ]
null
null
null
_sources/5-extra/opg-parameters-sneeuwvlok_solution.py
kooi/ippt-od
f1ba44ccfb72e6fcdfdc392fbfbec3e37c47b354
[ "MIT" ]
null
null
null
import turtle tina = turtle.Turtle() tina.shape("turtle") tina.speed(10) def parallellogram(lengte): for i in range(2): tina.forward(lengte) tina.right(60) tina.forward(lengte) tina.right(120) def sneeuwvlok(lengte, num): for i in range(num): parallellogram(lengte) tina.right(360.0/num) # 360.0 zorgt voor cast van int naar float sneeuwvlok(30, 6)
21.473684
72
0.644608
59
408
4.457627
0.525424
0.114068
0.171103
0.08365
0.197719
0
0
0
0
0
0
0.060897
0.235294
408
18
73
22.666667
0.782051
0.098039
0
0.133333
0
0
0.016393
0
0
0
0
0
0
1
0.133333
false
0
0.066667
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a5ff44d20ced0eb4ad46edf90219db489f08973
5,153
py
Python
nikola/plugins/task_render_listings.py
servalproject/nikola
4d78504d93597894f3da4a434dfafdec907601a7
[ "MIT" ]
1
2015-12-14T21:38:33.000Z
2015-12-14T21:38:33.000Z
nikola/plugins/task_render_listings.py
servalproject/nikola
4d78504d93597894f3da4a434dfafdec907601a7
[ "MIT" ]
null
null
null
nikola/plugins/task_render_listings.py
servalproject/nikola
4d78504d93597894f3da4a434dfafdec907601a7
[ "MIT" ]
null
null
null
# Copyright (c) 2012 Roberto Alsina y otros. # Permission is hereby granted, free of charge, to any # person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the # Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice # shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import unicode_literals, print_function import os from pygments import highlight from pygments.lexers import get_lexer_for_filename, TextLexer from pygments.formatters import HtmlFormatter from nikola.plugin_categories import Task from nikola import utils class Listings(Task): """Render pretty listings.""" name = "render_listings" def gen_tasks(self): """Render pretty code listings.""" kw = { "default_lang": self.site.config["DEFAULT_LANG"], "listings_folder": self.site.config["LISTINGS_FOLDER"], "output_folder": self.site.config["OUTPUT_FOLDER"], "index_file": self.site.config["INDEX_FILE"], } # Things to ignore in listings ignored_extensions = (".pyc",) def render_listing(in_name, out_name, folders=[], files=[]): if in_name: with open(in_name, 'r') as fd: try: lexer = get_lexer_for_filename(in_name) except: lexer = TextLexer() code = highlight(fd.read(), lexer, HtmlFormatter(cssclass='code', linenos="table", nowrap=False, lineanchors=utils.slugify(f), anchorlinenos=True)) title = os.path.basename(in_name) else: code = '' title = '' crumbs = utils.get_crumbs(os.path.relpath(out_name, kw['output_folder']), is_file=True) context = { 'code': code, 'title': title, 'crumbs': crumbs, 'lang': kw['default_lang'], 'folders': folders, 'files': files, 'description': title, } self.site.render_template('listing.tmpl', out_name, context) flag = True template_deps = self.site.template_system.template_deps('listing.tmpl') for root, dirs, files in os.walk(kw['listings_folder']): flag = False # Render all files out_name = os.path.join( kw['output_folder'], root, kw['index_file'] ) yield { 'basename': self.name, 'name': out_name, 'file_dep': template_deps, 'targets': [out_name], 'actions': [(render_listing, [None, out_name, dirs, files])], # This is necessary to reflect changes in blog title, # sidebar links, etc. 'uptodate': [utils.config_changed( self.site.config['GLOBAL_CONTEXT'])], 'clean': True, } for f in files: ext = os.path.splitext(f)[-1] if ext in ignored_extensions: continue in_name = os.path.join(root, f) out_name = os.path.join( kw['output_folder'], root, f) + '.html' yield { 'basename': self.name, 'name': out_name, 'file_dep': template_deps + [in_name], 'targets': [out_name], 'actions': [(render_listing, [in_name, out_name])], # This is necessary to reflect changes in blog title, # sidebar links, etc. 'uptodate': [utils.config_changed( self.site.config['GLOBAL_CONTEXT'])], 'clean': True, } if flag: yield { 'basename': self.name, 'actions': [], }
39.335878
81
0.523578
530
5,153
4.964151
0.375472
0.029266
0.031927
0.015964
0.196503
0.196503
0.155834
0.155834
0.155834
0.129228
0
0.001582
0.386765
5,153
130
82
39.638462
0.831013
0.253639
0
0.224719
0
0
0.108456
0
0
0
0
0
0
1
0.022472
false
0
0.078652
0
0.123596
0.011236
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a61f1e1f810996e1c76609bf6e7fcc907c4da57
2,020
py
Python
lang/py/aingle/test/gen_interop_data.py
AIngleLab/aae
6e95f89fad60e62bb5305afe97c72f3278d8e04b
[ "Apache-2.0" ]
null
null
null
lang/py/aingle/test/gen_interop_data.py
AIngleLab/aae
6e95f89fad60e62bb5305afe97c72f3278d8e04b
[ "Apache-2.0" ]
null
null
null
lang/py/aingle/test/gen_interop_data.py
AIngleLab/aae
6e95f89fad60e62bb5305afe97c72f3278d8e04b
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 ## # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. import os import sys import aingle.codecs import aingle.datafile import aingle.io import aingle.schema NULL_CODEC = "null" CODECS_TO_VALIDATE = aingle.codecs.KNOWN_CODECS.keys() DATUM = { "intField": 12, "longField": 15234324, "stringField": "hey", "boolField": True, "floatField": 1234.0, "doubleField": -1234.0, "bytesField": b"12312adf", "nullField": None, "arrayField": [5.0, 0.0, 12.0], "mapField": {"a": {"label": "a"}, "bee": {"label": "cee"}}, "unionField": 12.0, "enumField": "C", "fixedField": b"1019181716151413", "recordField": {"label": "blah", "children": [{"label": "inner", "children": []}]}, } def generate(schema_path, output_path): with open(schema_path) as schema_file: interop_schema = aingle.schema.parse(schema_file.read()) for codec in CODECS_TO_VALIDATE: filename = output_path if codec != NULL_CODEC: base, ext = os.path.splitext(output_path) filename = base + "_" + codec + ext with aingle.datafile.DataFileWriter(open(filename, "wb"), aingle.io.DatumWriter(), interop_schema, codec=codec) as dfw: dfw.append(DATUM) if __name__ == "__main__": generate(sys.argv[1], sys.argv[2])
31.5625
127
0.681188
266
2,020
5.078947
0.526316
0.044412
0.019245
0.023686
0
0
0
0
0
0
0
0.034462
0.195545
2,020
63
128
32.063492
0.796923
0.362871
0
0
0
0
0.182177
0
0
0
0
0
0
1
0.027778
false
0
0.166667
0
0.194444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a631c95edefbd6ccab71b999ffa359886535e5b
32,032
py
Python
astropy/units/tests/test_logarithmic.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
3
2018-03-20T15:09:16.000Z
2021-05-27T11:17:33.000Z
astropy/units/tests/test_logarithmic.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
astropy/units/tests/test_logarithmic.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ from __future__ import (absolute_import, unicode_literals, division, print_function) from ...extern import six from ...extern.six.moves import zip import pickle import itertools import pytest import numpy as np from numpy.testing.utils import assert_allclose from ...tests.helper import assert_quantity_allclose from ... import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation(object): def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol def test_predefined_string_roundtrip(): """Ensure roundtripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings(object): def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion(object): @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) class TestLogUnitArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.), lu2.to(lu2.physical_unit, np.arange(3.))) # If we effectively remove lu1, a normal unit should be returned. t2 = tf / lu2 assert not isinstance(t2, type(lu2)) assert t2 == u.m t3 = tf / lu2.function_unit assert not isinstance(t3, type(lu2)) assert t3 == u.m # For completeness, also ensure non-sensical operations fail with pytest.raises(TypeError): lu1 * object() with pytest.raises(TypeError): slice(None) * lu1 with pytest.raises(TypeError): lu1 / [] with pytest.raises(TypeError): 1 / lu1 @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible.""" lu1 = u.mag(u.Jy) if power == 0: assert lu1 ** power == u.dimensionless_unscaled elif power == 1: assert lu1 ** power == lu1 else: with pytest.raises(u.UnitsError): lu1 ** power # With dimensionless, though, it works, but returns a normal unit. lu2 = u.mag(u.dimensionless_unscaled) t = lu2**power if power == 0: assert t == u.dimensionless_unscaled elif power == 1: assert t == lu2 else: assert not isinstance(t, type(lu2)) assert t == lu2.function_unit**power # also check we roundtrip t2 = t**(1./power) assert t2 == lu2.function_unit with u.set_enabled_equivalencies(u.logarithmic()): assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)), lu2.to(lu2.physical_unit, np.arange(3.))) @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 + other with pytest.raises(u.UnitsError): lu1 - other with pytest.raises(u.UnitsError): other - lu1 def test_addition_subtraction_to_non_units_fails(self): lu1 = u.mag(u.Jy) with pytest.raises(TypeError): lu1 + 1. with pytest.raises(TypeError): lu1 - [1., 2., 3.] @pytest.mark.parametrize( 'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag))) def test_addition_subtraction(self, other): """Check physical units are changed appropriately""" lu1 = u.mag(u.Jy) other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled) lu_sf = lu1 + other assert lu_sf.is_equivalent(lu1.physical_unit * other_pu) lu_sr = other + lu1 assert lu_sr.is_equivalent(lu1.physical_unit * other_pu) lu_df = lu1 - other assert lu_df.is_equivalent(lu1.physical_unit / other_pu) lu_dr = other - lu1 assert lu_dr.is_equivalent(other_pu / lu1.physical_unit) def test_complicated_addition_subtraction(self): """for fun, a more complicated example of addition and subtraction""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) lu_dm = u.mag(dm0) lu_absST = u.STmag - lu_dm assert lu_absST.is_equivalent(u.erg/u.s/u.AA) def test_neg_pos(self): lu1 = u.mag(u.Jy) neg_lu = -lu1 assert neg_lu != lu1 assert neg_lu.physical_unit == u.Jy**-1 assert -neg_lu == lu1 pos_lu = +lu1 assert pos_lu is not lu1 assert pos_lu == lu1 def test_pickle(): lu1 = u.dex(u.cm/u.s**2) s = pickle.dumps(lu1) lu2 = pickle.loads(s) assert lu1 == lu2 def test_hashable(): lu1 = u.dB(u.mW) lu2 = u.dB(u.m) lu3 = u.dB(u.mW) assert hash(lu1) != hash(lu2) assert hash(lu1) == hash(lu3) luset = {lu1, lu2, lu3} assert len(luset) == 2 class TestLogQuantityCreation(object): @pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])) def test_logarithmic_quantities(self, lq, lu): """Check logarithmic quantities are all set up correctly""" assert lq._unit_class == lu assert type(lu()._quantity_class(1.)) is lq @pytest.mark.parametrize('lq_cls, physical_unit', itertools.product(lq_subclasses, pu_sample)) def test_subclass_creation(self, lq_cls, physical_unit): """Create LogQuantity subclass objects for some physical units, and basic check on transformations""" value = np.arange(1., 10.) log_q = lq_cls(value * physical_unit) assert log_q.unit.physical_unit == physical_unit assert log_q.unit.function_unit == log_q.unit._default_function_unit assert_allclose(log_q.physical.value, value) with pytest.raises(ValueError): lq_cls(value, physical_unit) @pytest.mark.parametrize( 'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_different_units(self, unit): q = u.Magnitude(1.23, unit) assert q.unit.function_unit == getattr(unit, 'function_unit', unit) assert q.unit.physical_unit is getattr(unit, 'physical_unit', u.dimensionless_unscaled) @pytest.mark.parametrize('value, unit', ( (1.*u.mag(u.Jy), None), (1.*u.dex(u.Jy), None), (1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)), (1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy)))) def test_function_values(self, value, unit): lq = u.Magnitude(value, unit) assert lq == value assert lq.unit.function_unit == u.mag assert lq.unit.physical_unit == getattr(unit, 'physical_unit', value.unit.physical_unit) @pytest.mark.parametrize( 'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_indirect_creation(self, unit): q1 = 2.5 * unit assert isinstance(q1, u.Magnitude) assert q1.value == 2.5 assert q1.unit == unit pv = 100. * unit.physical_unit q2 = unit * pv assert q2.unit == unit assert q2.unit.physical_unit == pv.unit assert q2.to_value(unit.physical_unit) == 100. assert (q2._function_view / u.mag).to_value(1) == -5. q3 = unit / 0.4 assert q3 == q1 def test_from_view(self): # Cannot view a physical quantity as a function quantity, since the # values would change. q = [100., 1000.] * u.cm/u.s**2 with pytest.raises(TypeError): q.view(u.Dex) # But fine if we have the right magnitude. q = [2., 3.] * u.dex lq = q.view(u.Dex) assert isinstance(lq, u.Dex) assert lq.unit.physical_unit == u.dimensionless_unscaled assert np.all(q == lq) def test_using_quantity_class(self): """Check that we can use Quantity if we have subok=True""" # following issue #5851 lu = u.dex(u.AA) with pytest.raises(u.UnitTypeError): u.Quantity(1., lu) q = u.Quantity(1., lu, subok=True) assert type(q) is lu._quantity_class def test_conversion_to_and_from_physical_quantities(): """Ensures we can convert from regular quantities.""" mst = [10., 12., 14.] * u.STmag flux_lambda = mst.physical mst_roundtrip = flux_lambda.to(u.STmag) # check we return a logquantity; see #5178. assert isinstance(mst_roundtrip, u.Magnitude) assert mst_roundtrip.unit == mst.unit assert_allclose(mst_roundtrip.value, mst.value) wave = [4956.8, 4959.55, 4962.3] * u.AA flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave)) mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave)) assert isinstance(mst_roundtrip2, u.Magnitude) assert mst_roundtrip2.unit == mst.unit assert_allclose(mst_roundtrip2.value, mst.value) def test_quantity_decomposition(): lq = 10.*u.mag(u.Jy) assert lq.decompose() == lq assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s] assert lq.si == lq assert lq.si.unit.physical_unit.bases == [u.kg, u.s] assert lq.cgs == lq assert lq.cgs.unit.physical_unit.bases == [u.g, u.s] class TestLogQuantityViews(object): def setup(self): self.lq = u.Magnitude(np.arange(10.) * u.Jy) self.lq2 = u.Magnitude(np.arange(5.)) def test_value_view(self): lq_value = self.lq.value assert type(lq_value) is np.ndarray lq_value[2] = -1. assert np.all(self.lq.value == lq_value) def test_function_view(self): lq_fv = self.lq._function_view assert type(lq_fv) is u.Quantity assert lq_fv.unit is self.lq.unit.function_unit lq_fv[3] = -2. * lq_fv.unit assert np.all(self.lq.value == lq_fv.value) def test_quantity_view(self): # Cannot view as Quantity, since the unit cannot be represented. with pytest.raises(TypeError): self.lq.view(u.Quantity) # But a dimensionless one is fine. q2 = self.lq2.view(u.Quantity) assert q2.unit is u.mag assert np.all(q2.value == self.lq2.value) lq3 = q2.view(u.Magnitude) assert type(lq3.unit) is u.MagUnit assert lq3.unit.physical_unit == u.dimensionless_unscaled assert np.all(lq3 == self.lq2) class TestLogQuantitySlicing(object): def test_item_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy) assert lq1[9] == u.Magnitude(10.*u.Jy) lq1[2] = 100.*u.Jy assert lq1[2] == u.Magnitude(100.*u.Jy) with pytest.raises(u.UnitsError): lq1[2] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2] = u.Magnitude(100.*u.m) assert lq1[2] == u.Magnitude(100.*u.Jy) def test_slice_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy) lq1[2:4] = 100.*u.Jy assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy)) with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2:4] = u.Magnitude(100.*u.m) assert np.all(lq1[2] == u.Magnitude(100.*u.Jy)) class TestLogQuantityArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other quantities is only possible when the physical unit is dimensionless, and that this turns the result into a normal quantity.""" lq = u.Magnitude(np.arange(1., 11.)*u.Jy) with pytest.raises(u.UnitsError): lq * (1.*u.m) with pytest.raises(u.UnitsError): (1.*u.m) * lq with pytest.raises(u.UnitsError): lq / lq for unit in (u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lq / unit lq2 = u.Magnitude(np.arange(1, 11.)) with pytest.raises(u.UnitsError): lq2 * lq with pytest.raises(u.UnitsError): lq2 / lq with pytest.raises(u.UnitsError): lq / lq2 # but dimensionless_unscaled can be cancelled r = lq2 / u.Magnitude(2.) assert r.unit == u.dimensionless_unscaled assert np.all(r.value == lq2.value/2.) # with dimensionless, normal units OK, but return normal quantities tf = lq2 * u.m tr = u.m * lq2 for t in (tf, tr): assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lq2.unit.physical_unit) t = tf / (50.*u.cm) # now we essentially have the same quantity but with a prefactor of 2 assert t.unit.is_equivalent(lq2.unit.function_unit) assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2) @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogQuantities to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (say, mag**2) is incompatible.""" lq = u.Magnitude(np.arange(1., 4.)*u.Jy) if power == 0: assert np.all(lq ** power == 1.) elif power == 1: assert np.all(lq ** power == lq) else: with pytest.raises(u.UnitsError): lq ** power # with dimensionless, it works, but falls back to normal quantity # (except for power=1) lq2 = u.Magnitude(np.arange(10.)) t = lq2**power if power == 0: assert t.unit is u.dimensionless_unscaled assert np.all(t.value == 1.) elif power == 1: assert np.all(t == lq2) else: assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit ** power with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(u.dimensionless_unscaled) def test_error_on_lq_as_power(self): lq = u.Magnitude(np.arange(1., 4.)*u.Jy) with pytest.raises(TypeError): lq ** lq @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lq = u.Magnitude(np.arange(1., 10.)*u.Jy) q = 1.23 * other with pytest.raises(u.UnitsError): lq + q with pytest.raises(u.UnitsError): lq - q with pytest.raises(u.UnitsError): q - lq @pytest.mark.parametrize( 'other', (1.23 * u.mag, 2.34 * u.mag(), u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m), 5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag))) def test_addition_subtraction(self, other): """Check that addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately.""" lq = u.Magnitude(np.arange(1., 10.)*u.Jy) other_physical = other.to(getattr(other.unit, 'physical_unit', u.dimensionless_unscaled), equivalencies=u.logarithmic()) lq_sf = lq + other assert_allclose(lq_sf.physical, lq.physical * other_physical) lq_sr = other + lq assert_allclose(lq_sr.physical, lq.physical * other_physical) lq_df = lq - other assert_allclose(lq_df.physical, lq.physical / other_physical) lq_dr = other - lq assert_allclose(lq_dr.physical, other_physical / lq.physical) @pytest.mark.parametrize('other', pu_sample) def test_inplace_addition_subtraction_unit_checks(self, other): lu1 = u.mag(u.Jy) lq1 = u.Magnitude(np.arange(1., 10.), lu1) with pytest.raises(u.UnitsError): lq1 += other assert np.all(lq1.value == np.arange(1., 10.)) assert lq1.unit == lu1 with pytest.raises(u.UnitsError): lq1 -= other assert np.all(lq1.value == np.arange(1., 10.)) assert lq1.unit == lu1 @pytest.mark.parametrize( 'other', (1.23 * u.mag, 2.34 * u.mag(), u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m), 5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag))) def test_inplace_addition_subtraction(self, other): """Check that inplace addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately.""" lq = u.Magnitude(np.arange(1., 10.)*u.Jy) other_physical = other.to(getattr(other.unit, 'physical_unit', u.dimensionless_unscaled), equivalencies=u.logarithmic()) lq_sf = lq.copy() lq_sf += other assert_allclose(lq_sf.physical, lq.physical * other_physical) lq_df = lq.copy() lq_df -= other assert_allclose(lq_df.physical, lq.physical / other_physical) def test_complicated_addition_subtraction(self): """For fun, a more complicated example of addition and subtraction.""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) DMmag = u.mag(dm0) m_st = 10. * u.STmag dm = 5. * DMmag M_st = m_st - dm assert M_st.unit.is_equivalent(u.erg/u.s/u.AA) assert np.abs(M_st.physical / (m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15 class TestLogQuantityComparisons(object): def test_comparison_to_non_quantities_fails(self): lq = u.Magnitude(np.arange(1., 10.)*u.Jy) # On python2, ordering operations always succeed, given essentially # meaningless results. if not six.PY2: with pytest.raises(TypeError): lq > 'a' assert not (lq == 'a') assert lq != 'a' def test_comparison(self): lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy) lq2 = u.Magnitude(2.*u.Jy) assert np.all((lq1 > lq2) == np.array([True, False, False])) assert np.all((lq1 == lq2) == np.array([False, True, False])) lq3 = u.Dex(2.*u.Jy) assert np.all((lq1 > lq3) == np.array([True, False, False])) assert np.all((lq1 == lq3) == np.array([False, True, False])) lq4 = u.Magnitude(2.*u.m) assert not (lq1 == lq4) assert lq1 != lq4 with pytest.raises(u.UnitsError): lq1 < lq4 q5 = 1.5 * u.Jy assert np.all((lq1 > q5) == np.array([True, False, False])) assert np.all((q5 < lq1) == np.array([True, False, False])) with pytest.raises(u.UnitsError): lq1 >= 2.*u.m with pytest.raises(u.UnitsError): lq1 <= lq1.value * u.mag # For physically dimensionless, we can compare with the function unit. lq6 = u.Magnitude(np.arange(1., 4.)) fv6 = lq6.value * u.mag assert np.all(lq6 == fv6) # but not some arbitrary unit, of course. with pytest.raises(u.UnitsError): lq6 < 2.*u.m class TestLogQuantityMethods(object): def setup(self): self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy) self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag() self.mags = (self.mJy, self.m1) @pytest.mark.parametrize('method', ('mean', 'min', 'max', 'round', 'trace', 'std', 'var', 'ptp', 'diff', 'ediff1d')) def test_always_ok(self, method): for mag in self.mags: res = getattr(mag, method)() assert np.all(res.value == getattr(mag._function_view, method)().value) if method in ('std', 'ptp', 'diff', 'ediff1d'): assert res.unit == u.mag() elif method == 'var': assert res.unit == u.mag**2 else: assert res.unit == mag.unit def test_clip(self): for mag in self.mags: assert np.all(mag.clip(2. * mag.unit, 4. * mag.unit).value == mag.value.clip(2., 4.)) @pytest.mark.parametrize('method', ('sum', 'cumsum', 'nansum')) def test_only_ok_if_dimensionless(self, method): res = getattr(self.m1, method)() assert np.all(res.value == getattr(self.m1._function_view, method)().value) assert res.unit == self.m1.unit with pytest.raises(TypeError): getattr(self.mJy, method)() def test_dot(self): assert np.all(self.m1.dot(self.m1).value == self.m1.value.dot(self.m1.value)) @pytest.mark.parametrize('method', ('prod', 'cumprod')) def test_never_ok(self, method): with pytest.raises(ValueError): getattr(self.mJy, method)() with pytest.raises(ValueError): getattr(self.m1, method)() class TestLogQuantityUfuncs(object): """Spot checks on ufuncs.""" def setup(self): self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy) self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag() self.mags = (self.mJy, self.m1) def test_power(self): assert np.all(np.power(self.mJy, 0.) == 1.) assert np.all(np.power(self.m1, 1.) == self.m1) assert np.all(np.power(self.mJy, 1.) == self.mJy) assert np.all(np.power(self.m1, 2.) == self.m1 ** 2) with pytest.raises(u.UnitsError): np.power(self.mJy, 2.) def test_not_implemented_with_physical_unit(self): with pytest.raises(u.UnitsError): np.square(self.mJy) assert np.all(np.square(self.m1) == self.m1 ** 2)
37.031214
80
0.59094
4,465
32,032
4.116013
0.095409
0.017847
0.053107
0.039776
0.529111
0.459571
0.389977
0.325117
0.287572
0.262542
0
0.031804
0.279502
32,032
864
81
37.074074
0.764505
0.113543
0
0.286385
0
0
0.021358
0.001139
0
0
0
0
0.314554
1
0.092332
false
0
0.015649
0
0.125196
0.001565
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a648d570a29d5a4d4e0f9f5bffd72aadfab36cb
2,632
py
Python
visual_odometry/visual_odometry.py
vineeths96/Visual-Odometry
88d96a23a0bde9c05de1f4dddcca8b6c4bd817e7
[ "MIT" ]
2
2021-07-20T03:49:54.000Z
2022-01-19T13:43:51.000Z
visual_odometry/visual_odometry.py
vineeths96/Visual-Odometry
88d96a23a0bde9c05de1f4dddcca8b6c4bd817e7
[ "MIT" ]
null
null
null
visual_odometry/visual_odometry.py
vineeths96/Visual-Odometry
88d96a23a0bde9c05de1f4dddcca8b6c4bd817e7
[ "MIT" ]
3
2021-11-28T06:23:23.000Z
2021-12-05T17:09:00.000Z
from .monovideoodometry import MonoVideoOdometry from .parameters import * def visual_odometry( image_path="./input/sequences/10/image_0/", pose_path="./input/poses/10.txt", fivepoint=False, ): """ Plots the estimated odometry path using either five point estimation or eight point estimation :param image_path: Path to the directory of camera images :param pose_path: Path to the directory of pose file :param fivepoint: Whether to use five point or eight point method :return: None """ vo = MonoVideoOdometry(image_path, pose_path, FOCAL, PP, K, LUCAS_KANADE_PARAMS, fivepoint) trajectory = np.zeros(shape=(800, 1200, 3)) frame_count = 0 while vo.hasNextFrame(): frame_count += 1 frame = vo.current_frame cv2.imshow("Frame", frame) k = cv2.waitKey(1) if k == 27: break vo.process_frame() estimated_coordinates = vo.get_mono_coordinates() true_coordinates = vo.get_true_coordinates() print("MSE Error: ", np.linalg.norm(estimated_coordinates - true_coordinates)) print("x: {}, y: {}, z: {}".format(*[str(pt) for pt in estimated_coordinates])) print("True_x: {}, True_y: {}, True_z: {}".format(*[str(pt) for pt in true_coordinates])) draw_x, draw_y, draw_z = [int(round(x)) for x in estimated_coordinates] true_x, true_y, true_z = [int(round(x)) for x in true_coordinates] trajectory = cv2.circle(trajectory, (true_x + 400, true_z + 100), 1, list((0, 0, 255)), 4) trajectory = cv2.circle(trajectory, (draw_x + 400, draw_z + 100), 1, list((0, 255, 0)), 4) cv2.putText( trajectory, "Actual Position:", (140, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, ) cv2.putText(trajectory, "Red", (270, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) cv2.putText( trajectory, "Estimated Odometry Position:", (30, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, ) cv2.putText( trajectory, "Green", (270, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, ) cv2.imshow("trajectory", trajectory) if frame_count % 5 == 0: cv2.imwrite(f"./results/trajectory/trajectory_{frame_count}.png", trajectory) cv2.imwrite(f"./results/trajectory.png", trajectory) cv2.destroyAllWindows()
32.097561
98
0.578647
324
2,632
4.546296
0.345679
0.050917
0.054311
0.057026
0.278344
0.210455
0.158859
0.071962
0.071962
0.071962
0
0.066522
0.297492
2,632
81
99
32.493827
0.730124
0.108283
0
0.283333
0
0
0.109287
0.04406
0
0
0
0
0
1
0.016667
false
0
0.033333
0
0.05
0.05
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a659a66fbda946ae307b1633f49b480eec28005
886
py
Python
tf-2-data-parallelism/src/utils.py
Amirosimani/amazon-sagemaker-script-mode
ea8d7d6b1b0613dffa793c9ae247cfd8868034ec
[ "Apache-2.0" ]
144
2019-02-05T21:03:30.000Z
2022-03-24T15:24:32.000Z
tf-2-data-parallelism/src/utils.py
kirit93/amazon-sagemaker-script-mode
095af07488889bb2655b741749d8740d3e11a49e
[ "Apache-2.0" ]
22
2019-03-04T04:18:02.000Z
2022-03-09T00:21:36.000Z
tf-2-data-parallelism/src/utils.py
kirit93/amazon-sagemaker-script-mode
095af07488889bb2655b741749d8740d3e11a49e
[ "Apache-2.0" ]
94
2019-02-05T21:03:33.000Z
2022-01-16T07:29:15.000Z
import os import numpy as np import tensorflow as tf def get_train_data(train_dir, batch_size): train_images = np.load(os.path.join(train_dir, 'train_images.npy')) train_labels = np.load(os.path.join(train_dir, 'train_labels.npy')) print('train_images', train_images.shape, 'train_labels', train_labels.shape) dataset_train = tf.data.Dataset.from_tensor_slices((train_images, train_labels)) dataset_train = dataset_train.repeat().shuffle(10000).batch(batch_size) return dataset_train def get_val_data(val_dir): test_images = np.load(os.path.join(val_dir, 'validation_images.npy')) test_labels = np.load(os.path.join(val_dir, 'validation_labels.npy')) print('validation_images', test_images.shape, 'validation_labels', test_labels.shape) dataset_test = tf.data.Dataset.from_tensor_slices((test_images, test_labels)) return dataset_test
36.916667
89
0.76298
134
886
4.731343
0.246269
0.086751
0.050473
0.07571
0.321767
0.321767
0.192429
0.192429
0
0
0
0.006402
0.11851
886
23
90
38.521739
0.805378
0
0
0
0
0
0.148984
0.047404
0
0
0
0
0
1
0.125
false
0
0.1875
0
0.4375
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a65a78ac7de33dc7adca445fb1aae94ba18f829
10,269
py
Python
scripts/run_rbf_comparison_car_air_top5.py
CaptainCandy/influence-release
a152486a1c130fb5f907259c6692b9fe0d2ef6d0
[ "MIT" ]
null
null
null
scripts/run_rbf_comparison_car_air_top5.py
CaptainCandy/influence-release
a152486a1c130fb5f907259c6692b9fe0d2ef6d0
[ "MIT" ]
null
null
null
scripts/run_rbf_comparison_car_air_top5.py
CaptainCandy/influence-release
a152486a1c130fb5f907259c6692b9fe0d2ef6d0
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue Mar 19 16:26:35 2019 @author: Administrator """ # Forked from run_rbf_comparison.py from __future__ import division from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals import math import copy import numpy as np import pandas as pd import sklearn.linear_model as linear_model import sklearn.preprocessing as preprocessing import scipy import scipy.linalg as slin import scipy.sparse.linalg as sparselin import scipy.sparse as sparse import random import sys sys.path.append("C:/Tang/influence-release-master") #设置自定义包的搜索路径 from load_vehicles import load_vehicles import tensorflow as tf from tensorflow.contrib.learn.python.learn.datasets import base from sklearn.metrics.pairwise import rbf_kernel from influence.inceptionModel import BinaryInceptionModel from influence.smooth_hinge import SmoothHinge from influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS import influence.dataset as dataset from influence.dataset import DataSet from influence.dataset_poisoning import generate_inception_features #%% def get_Y_pred_correct_inception(model): Y_test = model.data_sets.test.labels if np.min(Y_test) < -0.5: Y_test = (np.copy(Y_test) + 1) / 2 Y_pred = model.sess.run(model.preds, feed_dict=model.all_test_feed_dict) Y_pred_correct = np.zeros([len(Y_test)]) for idx, label in enumerate(Y_test): Y_pred_correct[idx] = Y_pred[idx, int(label)] return Y_pred_correct num_classes = 2 num_train_ex_per_class = 40 num_test_ex_per_class = 300 dataset_name = 'carair_%s_%s' % (num_train_ex_per_class, num_test_ex_per_class) image_data_sets = load_vehicles( num_train_ex_per_class=num_train_ex_per_class, num_test_ex_per_class=num_test_ex_per_class) weight_decay = 0.001 initial_learning_rate = 0.001 keep_probs = None decay_epochs = [1000, 10000] #%% ### Generate kernelized feature vectors X_train = image_data_sets.train.x X_test = image_data_sets.test.x Y_train = np.copy(image_data_sets.train.labels) * 2 - 1 Y_test = np.copy(image_data_sets.test.labels) * 2 - 1 num_train = X_train.shape[0] num_test = X_test.shape[0] X_stacked = np.vstack((X_train, X_test)) gamma = 0.05 weight_decay = 0.0001 K = rbf_kernel(X_stacked, gamma = gamma / num_train) # ============================================================================= # L = slin.cholesky(K, lower=True) # L_train = L[:num_train, :num_train] # L_test = L[num_train:, :num_train] # ============================================================================= K_train = K[:num_train, :num_train] K_test = K[num_train:, :num_train] ### Compare top 5 influential examples from each network test_idx = 0 ## RBF input_channels = 1 weight_decay = 0.001 batch_size = num_train initial_learning_rate = 0.001 keep_probs = None max_lbfgs_iter = 1000 use_bias = False decay_epochs = [1000, 10000] tf.reset_default_graph() X_train = image_data_sets.train.x Y_train = image_data_sets.train.labels * 2 - 1 train = DataSet(K_train, Y_train) test = DataSet(K_test, Y_test) data_sets = base.Datasets(train=train, validation=None, test=test) input_dim = data_sets.train.x.shape[1] # Train with hinge print('Train rbf with hinge...') rbf_model = SmoothHinge( temp=0, use_bias=use_bias, input_dim=input_dim, weight_decay=weight_decay, num_classes=num_classes, batch_size=batch_size, data_sets=data_sets, initial_learning_rate=initial_learning_rate, keep_probs=keep_probs, decay_epochs=decay_epochs, mini_batch=False, train_dir='output7', log_dir='log', model_name='carair_rbf_hinge_t-0') rbf_model.train() hinge_W = rbf_model.sess.run(rbf_model.params)[0] # Then load weights into smoothed version print('Load weights into smoothed version...') tf.reset_default_graph() rbf_model = SmoothHinge( temp=0.001, use_bias=use_bias, input_dim=input_dim, weight_decay=weight_decay, num_classes=num_classes, batch_size=batch_size, data_sets=data_sets, initial_learning_rate=initial_learning_rate, keep_probs=keep_probs, decay_epochs=decay_epochs, mini_batch=False, train_dir='output7', log_dir='log', model_name='car_air_rbf_hinge_t-0.001') params_feed_dict = {} params_feed_dict[rbf_model.W_placeholder] = hinge_W rbf_model.sess.run(rbf_model.set_params_op, feed_dict=params_feed_dict) rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss( [test_idx], np.arange(len(rbf_model.data_sets.train.labels)), force_refresh=True) #%% ## Inception dataset_name = 'carair_40_300' test_idx = 0 # Generate inception features print('Generate inception features...') img_side = 299 num_channels = 3 num_train_ex_per_class = 40 num_test_ex_per_class = 300 batch_size = 20 #TODO: 需要根据配置修改 # reset_default_graph大概就是重置当前线程,让tf session里定义的东西都失效,重来。就是重开一个神经网络session tf.reset_default_graph() full_model_name = '%s_inception' % dataset_name # 下面的语句是定义一个inception双分类器 full_model = BinaryInceptionModel( img_side=img_side, num_channels=num_channels, weight_decay=weight_decay, num_classes=num_classes, batch_size=batch_size, data_sets=image_data_sets, initial_learning_rate=initial_learning_rate, keep_probs=keep_probs, decay_epochs=decay_epochs, mini_batch=True, train_dir='output9', log_dir='log', model_name=full_model_name) # 下面的代码是在使用inception的卷积层生成特征 train_inception_features_val = generate_inception_features( full_model, image_data_sets.train.x, image_data_sets.train.labels, batch_size=batch_size) test_inception_features_val = generate_inception_features( full_model, image_data_sets.test.x, image_data_sets.test.labels, batch_size=batch_size) train = DataSet( train_inception_features_val, image_data_sets.train.labels) test = DataSet( test_inception_features_val, image_data_sets.test.labels) # train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name) # train = DataSet(train_f['inception_features_val'], train_f['labels']) # test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name) # test = DataSet(test_f['inception_features_val'], test_f['labels']) validation = None # 上面的代码是训练了inception模型的全连接层前面的部分,因此输出的feature有2048个维度 data_sets = base.Datasets(train=train, validation=validation, test=test) # train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name) # train = DataSet(train_f['inception_features_val'], train_f['labels']) # test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name) # test = DataSet(test_f['inception_features_val'], test_f['labels']) # validation = None # data_sets = base.Datasets(train=train, validation=validation, test=test) # 下面的代码利用从inception卷积层训练完成后的feature进行一个二分类逻辑回归,取消卷积层后面的FC全连接层 print('Train logistic regression after inception...') input_dim = 2048 weight_decay = 0.001 batch_size = 20 initial_learning_rate = 0.001 keep_probs = None decay_epochs = [1000, 10000] max_lbfgs_iter = 1000 num_classes = 2 tf.reset_default_graph() inception_model = BinaryLogisticRegressionWithLBFGS( input_dim=input_dim, weight_decay=weight_decay, max_lbfgs_iter=max_lbfgs_iter, num_classes=num_classes, batch_size=batch_size, data_sets=data_sets, initial_learning_rate=initial_learning_rate, keep_probs=keep_probs, decay_epochs=decay_epochs, mini_batch=False, train_dir='output9', log_dir='log', model_name='%s_inception_onlytop' % dataset_name) inception_model.train() # ============================================================================= # inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss( # [test_idx], # np.arange(len(inception_model.data_sets.train.labels)), # force_refresh=True) # # x_test = X_test[test_idx, :] # y_test = Y_test[test_idx] # # # distances = dataset.find_distances(x_test, X_train) # flipped_idx = Y_train != y_test # rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict) # rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict) # inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model) # # # np.savez( # 'output7/rbf_carair_results_%s' % test_idx, # test_idx=test_idx, # distances=distances, # flipped_idx=flipped_idx, # rbf_margins_test=rbf_margins_test, # rbf_margins_train=rbf_margins_train, # inception_Y_pred_correct=inception_Y_pred_correct, # rbf_predicted_loss_diffs=rbf_predicted_loss_diffs, # inception_predicted_loss_diffs=inception_predicted_loss_diffs # ) # ============================================================================= #%% print('Save results...') #rand_test = random.sample(range(1, 600),50) #np.savez('output7/rand_test_point', rand_test=rand_test) for test_idx in range(1, 600): rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss( [test_idx], np.arange(len(rbf_model.data_sets.train.labels)), force_refresh=True) inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss( [test_idx], np.arange(len(inception_model.data_sets.train.labels)), force_refresh=True) x_test = X_test[test_idx, :] y_test = Y_test[test_idx] distances = dataset.find_distances(x_test, X_train) flipped_idx = Y_train != y_test rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict) rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict) inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model) np.savez( 'output9/rbf_carair_results_%s' % test_idx, test_idx=test_idx, distances=distances, flipped_idx=flipped_idx, rbf_margins_test=rbf_margins_test, rbf_margins_train=rbf_margins_train, inception_Y_pred_correct=inception_Y_pred_correct, rbf_predicted_loss_diffs=rbf_predicted_loss_diffs, inception_predicted_loss_diffs=inception_predicted_loss_diffs )
30.930723
101
0.7435
1,474
10,269
4.770014
0.160109
0.034135
0.025885
0.021619
0.615417
0.559238
0.533779
0.513583
0.484142
0.484142
0
0.017785
0.134872
10,269
331
102
31.024169
0.773638
0.268575
0
0.419811
0
0
0.047363
0.011572
0
0
0
0.003021
0
1
0.004717
false
0
0.122642
0
0.132075
0.028302
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a6623a4cf3e4c5b80fbcffbafebb173294bba30
1,478
py
Python
data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/find_4g.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
null
null
null
data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/find_4g.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
null
null
null
data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/find_4g.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
1
2019-12-04T08:23:33.000Z
2019-12-04T08:23:33.000Z
import sys import pickle ########################################################## # usage # pypy find_4g.py xid_train.p ../../data/train # xid_train.p is a list like ['loIP1tiwELF9YNZQjSUO',''....] to specify # the order of samples in traing data # ../../data/train is the path of original train data ########################################################## xid_name=sys.argv[1] data_path=sys.argv[2] xid=pickle.load(open(xid_name)) #xid_train.p or xid_test.p newc=pickle.load(open('newc.p')) newc2=pickle.load(open('cutcmd3g_for_4g.p')) cmd4g={} for i in newc2: for j in newc: cmd4g[(i[0],i[1],i[2],j)]=0 print(newc) for c,f in enumerate(xid): count={} fo=open(data_path+'/'+f+'.asm') tot=0 a=-1 b=-1 d=-1 e=-1 for line in fo: xx=line.split() for x in xx: if x in newc: a=b b=d d=e e=x if (a,b,d,e) in cmd4g: if (a,b,d,e) not in count: count[(a,b,d,e)]=0 count[(a,b,d,e)]+=1 tot+=1 fo.close() if True:#c%10000==0: print(c*1.0/len(xid),tot) for i in count: cmd4g[i]=count[i]+cmd4g[i] del count cmd4gx={} for i in cmd4g: if cmd4g[i]>0: cmd4gx[i]=cmd4g[i] print(len(cmd4gx)) pickle.dump(cmd4gx,open('cmd4g.p','w'))
25.050847
72
0.451962
221
1,478
2.972851
0.307692
0.045662
0.018265
0.024353
0.045662
0
0
0
0
0
0
0.043086
0.324763
1,478
58
73
25.482759
0.61523
0.165765
0
0
0
0
0.034253
0
0
0
0
0
0
1
0
false
0
0.044444
0
0.044444
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a6655e14286bbfcb799353c5812e25b7720b10d
1,512
py
Python
pygments/lexers/trafficscript.py
blu-base/pygments
da799d14818ed538bf937684a19ce779ddde9446
[ "BSD-2-Clause" ]
1
2015-06-08T14:52:49.000Z
2015-06-08T14:52:49.000Z
pygments/lexers/trafficscript.py
blu-base/pygments
da799d14818ed538bf937684a19ce779ddde9446
[ "BSD-2-Clause" ]
1
2022-03-13T09:17:24.000Z
2022-03-13T09:18:02.000Z
pygments/lexers/trafficscript.py
blu-base/pygments
da799d14818ed538bf937684a19ce779ddde9446
[ "BSD-2-Clause" ]
null
null
null
""" pygments.lexers.trafficscript ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for RiverBed's TrafficScript (RTS) language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment __all__ = ['RtsLexer'] class RtsLexer(RegexLexer): """ For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_ .. versionadded:: 2.1 """ name = 'TrafficScript' aliases = ['trafficscript', 'rts'] filenames = ['*.rts'] tokens = { 'root' : [ (r"'(\\\\|\\[^\\]|[^'\\])*'", String), (r'"', String, 'escapable-string'), (r'(0x[0-9a-fA-F]+|\d+)', Number), (r'\d+\.\d+', Number.Float), (r'\$[a-zA-Z](\w|_)*', Name.Variable), (r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword), (r'[a-zA-Z][\w.]*', Name.Function), (r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator), (r'(>=|<=|==|!=|' r'&&|\|\||' r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|' r'>>|<<|' r'\+\+|--|=>)', Operator), (r'[ \t\r]+', Text), (r'#[^\n]*', Comment), ], 'escapable-string' : [ (r'\\[tsn]', String.Escape), (r'[^"]', String), (r'"', String, '#pop'), ], }
29.076923
83
0.433201
141
1,512
4.602837
0.539007
0.043143
0.013867
0.043143
0.030817
0.030817
0
0
0
0
0
0.012004
0.28373
1,512
51
84
29.647059
0.587258
0.210317
0
0.064516
0
0.032258
0.310375
0.129904
0
0
0
0
0
1
0
false
0
0.096774
0
0.258065
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a681ede8ff42ae46d972ef7a200eff04f8f87d4
20,333
py
Python
pandas/tests/indexes/test_common.py
dimithras/pandas
d321be6e2a43270625abf671d9e59f16529c4b48
[ "BSD-3-Clause" ]
1
2020-10-29T17:32:26.000Z
2020-10-29T17:32:26.000Z
pandas/tests/indexes/test_common.py
BhavarthShah/pandas
efb068f25b911ff3009d5692eb831df35bb042e5
[ "BSD-3-Clause" ]
null
null
null
pandas/tests/indexes/test_common.py
BhavarthShah/pandas
efb068f25b911ff3009d5692eb831df35bb042e5
[ "BSD-3-Clause" ]
null
null
null
""" Collection of tests asserting things that should be true for any index subclass. Makes use of the `indices` fixture defined in pandas/tests/indexes/conftest.py. """ import re import numpy as np import pytest from pandas._libs.tslibs import iNaT from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion import pandas as pd from pandas import ( CategoricalIndex, DatetimeIndex, MultiIndex, PeriodIndex, RangeIndex, TimedeltaIndex, ) import pandas._testing as tm class TestCommon: def test_droplevel(self, index): # GH 21115 if isinstance(index, MultiIndex): # Tested separately in test_multi.py return assert index.droplevel([]).equals(index) for level in index.name, [index.name]: if isinstance(index.name, tuple) and level is index.name: # GH 21121 : droplevel with tuple name continue with pytest.raises(ValueError): index.droplevel(level) for level in "wrong", ["wrong"]: with pytest.raises( KeyError, match=r"'Requested level \(wrong\) does not match index name \(None\)'", ): index.droplevel(level) def test_constructor_non_hashable_name(self, index): # GH 20527 if isinstance(index, MultiIndex): pytest.skip("multiindex handled in test_multi.py") message = "Index.name must be a hashable type" renamed = [["1"]] # With .rename() with pytest.raises(TypeError, match=message): index.rename(name=renamed) # With .set_names() with pytest.raises(TypeError, match=message): index.set_names(names=renamed) def test_constructor_unwraps_index(self, index): if isinstance(index, pd.MultiIndex): raise pytest.skip("MultiIndex has no ._data") a = index b = type(a)(a) tm.assert_equal(a._data, b._data) @pytest.mark.parametrize("itm", [101, "no_int"]) # FutureWarning from non-tuple sequence of nd indexing @pytest.mark.filterwarnings("ignore::FutureWarning") def test_getitem_error(self, index, itm): with pytest.raises(IndexError): index[itm] @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_corner_union(self, index, fname, sname, expected_name): # GH 9943 9862 # Test unions with various name combinations # Do not test MultiIndex or repeats if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # Test copy.union(copy) first = index.copy().set_names(fname) second = index.copy().set_names(sname) union = first.union(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(union, expected) # Test copy.union(empty) first = index.copy().set_names(fname) second = index.drop(index).set_names(sname) union = first.union(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(union, expected) # Test empty.union(copy) first = index.drop(index).set_names(fname) second = index.copy().set_names(sname) union = first.union(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(union, expected) # Test empty.union(empty) first = index.drop(index).set_names(fname) second = index.drop(index).set_names(sname) union = first.union(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(union, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_union_unequal(self, index, fname, sname, expected_name): if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # test copy.union(subset) - need sort for unicode and string first = index.copy().set_names(fname) second = index[1:].set_names(sname) union = first.union(second).sort_values() expected = index.set_names(expected_name).sort_values() tm.assert_index_equal(union, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_corner_intersect(self, index, fname, sname, expected_name): # GH35847 # Test intersections with various name combinations if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # Test copy.intersection(copy) first = index.copy().set_names(fname) second = index.copy().set_names(sname) intersect = first.intersection(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test copy.intersection(empty) first = index.copy().set_names(fname) second = index.drop(index).set_names(sname) intersect = first.intersection(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test empty.intersection(copy) first = index.drop(index).set_names(fname) second = index.copy().set_names(sname) intersect = first.intersection(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test empty.intersection(empty) first = index.drop(index).set_names(fname) second = index.drop(index).set_names(sname) intersect = first.intersection(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_intersect_unequal(self, index, fname, sname, expected_name): if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # test copy.intersection(subset) - need sort for unicode and string first = index.copy().set_names(fname) second = index[1:].set_names(sname) intersect = first.intersection(second).sort_values() expected = index[1:].set_names(expected_name).sort_values() tm.assert_index_equal(intersect, expected) def test_to_flat_index(self, index): # 22866 if isinstance(index, MultiIndex): pytest.skip("Separate expectation for MultiIndex") result = index.to_flat_index() tm.assert_index_equal(result, index) def test_set_name_methods(self, index): new_name = "This is the new name for this index" # don't tests a MultiIndex here (as its tested separated) if isinstance(index, MultiIndex): pytest.skip("Skip check for MultiIndex") original_name = index.name new_ind = index.set_names([new_name]) assert new_ind.name == new_name assert index.name == original_name res = index.rename(new_name, inplace=True) # should return None assert res is None assert index.name == new_name assert index.names == [new_name] # FIXME: dont leave commented-out # with pytest.raises(TypeError, match="list-like"): # # should still fail even if it would be the right length # ind.set_names("a") with pytest.raises(ValueError, match="Level must be None"): index.set_names("a", level=0) # rename in place just leaves tuples and other containers alone name = ("A", "B") index.rename(name, inplace=True) assert index.name == name assert index.names == [name] def test_copy_and_deepcopy(self, index): from copy import copy, deepcopy if isinstance(index, MultiIndex): pytest.skip("Skip check for MultiIndex") for func in (copy, deepcopy): idx_copy = func(index) assert idx_copy is not index assert idx_copy.equals(index) new_copy = index.copy(deep=True, name="banana") assert new_copy.name == "banana" def test_unique(self, index): # don't test a MultiIndex here (as its tested separated) # don't test a CategoricalIndex because categories change (GH 18291) if isinstance(index, (MultiIndex, CategoricalIndex)): pytest.skip("Skip check for MultiIndex/CategoricalIndex") # GH 17896 expected = index.drop_duplicates() for level in 0, index.name, None: result = index.unique(level=level) tm.assert_index_equal(result, expected) msg = "Too many levels: Index has only 1 level, not 4" with pytest.raises(IndexError, match=msg): index.unique(level=3) msg = ( fr"Requested level \(wrong\) does not match index name " fr"\({re.escape(index.name.__repr__())}\)" ) with pytest.raises(KeyError, match=msg): index.unique(level="wrong") def test_get_unique_index(self, index): # MultiIndex tested separately if not len(index) or isinstance(index, MultiIndex): pytest.skip("Skip check for empty Index and MultiIndex") idx = index[[0] * 5] idx_unique = index[[0]] # We test against `idx_unique`, so first we make sure it's unique # and doesn't contain nans. assert idx_unique.is_unique is True try: assert idx_unique.hasnans is False except NotImplementedError: pass for dropna in [False, True]: result = idx._get_unique_index(dropna=dropna) tm.assert_index_equal(result, idx_unique) # nans: if not index._can_hold_na: pytest.skip("Skip na-check if index cannot hold na") if is_period_dtype(index.dtype): vals = index[[0] * 5]._data vals[0] = pd.NaT elif needs_i8_conversion(index.dtype): vals = index.asi8[[0] * 5] vals[0] = iNaT else: vals = index.values[[0] * 5] vals[0] = np.nan vals_unique = vals[:2] if index.dtype.kind in ["m", "M"]: # i.e. needs_i8_conversion but not period_dtype, as above vals = type(index._data)._simple_new(vals, dtype=index.dtype) vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype) idx_nan = index._shallow_copy(vals) idx_unique_nan = index._shallow_copy(vals_unique) assert idx_unique_nan.is_unique is True assert idx_nan.dtype == index.dtype assert idx_unique_nan.dtype == index.dtype for dropna, expected in zip([False, True], [idx_unique_nan, idx_unique]): for i in [idx_nan, idx_unique_nan]: result = i._get_unique_index(dropna=dropna) tm.assert_index_equal(result, expected) def test_mutability(self, index): if not len(index): pytest.skip("Skip check for empty Index") msg = "Index does not support mutable operations" with pytest.raises(TypeError, match=msg): index[0] = index[0] def test_view(self, index): assert index.view().name == index.name def test_searchsorted_monotonic(self, index): # GH17271 # not implemented for tuple searches in MultiIndex # or Intervals searches in IntervalIndex if isinstance(index, (MultiIndex, pd.IntervalIndex)): pytest.skip("Skip check for MultiIndex/IntervalIndex") # nothing to test if the index is empty if index.empty: pytest.skip("Skip check for empty Index") value = index[0] # determine the expected results (handle dupes for 'right') expected_left, expected_right = 0, (index == value).argmin() if expected_right == 0: # all values are the same, expected_right should be length expected_right = len(index) # test _searchsorted_monotonic in all cases # test searchsorted only for increasing if index.is_monotonic_increasing: ssm_left = index._searchsorted_monotonic(value, side="left") assert expected_left == ssm_left ssm_right = index._searchsorted_monotonic(value, side="right") assert expected_right == ssm_right ss_left = index.searchsorted(value, side="left") assert expected_left == ss_left ss_right = index.searchsorted(value, side="right") assert expected_right == ss_right elif index.is_monotonic_decreasing: ssm_left = index._searchsorted_monotonic(value, side="left") assert expected_left == ssm_left ssm_right = index._searchsorted_monotonic(value, side="right") assert expected_right == ssm_right else: # non-monotonic should raise. with pytest.raises(ValueError): index._searchsorted_monotonic(value, side="left") def test_pickle(self, index): original_name, index.name = index.name, "foo" unpickled = tm.round_trip_pickle(index) assert index.equals(unpickled) index.name = original_name def test_drop_duplicates(self, index, keep): if isinstance(index, MultiIndex): pytest.skip("MultiIndex is tested separately") if isinstance(index, RangeIndex): pytest.skip( "RangeIndex is tested in test_drop_duplicates_no_duplicates " "as it cannot hold duplicates" ) if len(index) == 0: pytest.skip( "empty index is tested in test_drop_duplicates_no_duplicates " "as it cannot hold duplicates" ) # make unique index holder = type(index) unique_values = list(set(index)) unique_idx = holder(unique_values) # make duplicated index n = len(unique_idx) duplicated_selection = np.random.choice(n, int(n * 1.5)) idx = holder(unique_idx.values[duplicated_selection]) # Series.duplicated is tested separately expected_duplicated = ( pd.Series(duplicated_selection).duplicated(keep=keep).values ) tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated) # Series.drop_duplicates is tested separately expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep)) tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped) def test_drop_duplicates_no_duplicates(self, index): if isinstance(index, MultiIndex): pytest.skip("MultiIndex is tested separately") # make unique index if isinstance(index, RangeIndex): # RangeIndex cannot have duplicates unique_idx = index else: holder = type(index) unique_values = list(set(index)) unique_idx = holder(unique_values) # check on unique index expected_duplicated = np.array([False] * len(unique_idx), dtype="bool") tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated) result_dropped = unique_idx.drop_duplicates() tm.assert_index_equal(result_dropped, unique_idx) # validate shallow copy assert result_dropped is not unique_idx def test_drop_duplicates_inplace(self, index): msg = r"drop_duplicates\(\) got an unexpected keyword argument" with pytest.raises(TypeError, match=msg): index.drop_duplicates(inplace=True) def test_has_duplicates(self, index): holder = type(index) if not len(index) or isinstance(index, (MultiIndex, RangeIndex)): # MultiIndex tested separately in: # tests/indexes/multi/test_unique_and_duplicates. # RangeIndex is unique by definition. pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex") idx = holder([index[0]] * 5) assert idx.is_unique is False assert idx.has_duplicates is True @pytest.mark.parametrize( "dtype", ["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"], ) def test_astype_preserves_name(self, index, dtype): # https://github.com/pandas-dev/pandas/issues/32013 if isinstance(index, MultiIndex): index.names = ["idx" + str(i) for i in range(index.nlevels)] else: index.name = "idx" try: # Some of these conversions cannot succeed so we use a try / except result = index.astype(dtype) except (ValueError, TypeError, NotImplementedError, SystemError): return if isinstance(index, MultiIndex): assert result.names == index.names else: assert result.name == index.name def test_ravel_deprecation(self, index): # GH#19956 ravel returning ndarray is deprecated with tm.assert_produces_warning(FutureWarning): index.ravel() @pytest.mark.parametrize("na_position", [None, "middle"]) def test_sort_values_invalid_na_position(index_with_missing, na_position): if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # datetime-like indices will get na_position kwarg as part of # synchronizing duplicate-sorting behavior, because we currently expect # them, other indices, and Series to sort differently (xref 35922) pytest.xfail("sort_values does not support na_position kwarg") elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): pytest.xfail("missing value sorting order not defined for index type") if na_position not in ["first", "last"]: with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"): index_with_missing.sort_values(na_position=na_position) @pytest.mark.parametrize("na_position", ["first", "last"]) def test_sort_values_with_missing(index_with_missing, na_position): # GH 35584. Test that sort_values works with missing values, # sort non-missing and place missing according to na_position if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # datetime-like indices will get na_position kwarg as part of # synchronizing duplicate-sorting behavior, because we currently expect # them, other indices, and Series to sort differently (xref 35922) pytest.xfail("sort_values does not support na_position kwarg") elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): pytest.xfail("missing value sorting order not defined for index type") missing_count = np.sum(index_with_missing.isna()) not_na_vals = index_with_missing[index_with_missing.notna()].values sorted_values = np.sort(not_na_vals) if na_position == "first": sorted_values = np.concatenate([[None] * missing_count, sorted_values]) else: sorted_values = np.concatenate([sorted_values, [None] * missing_count]) expected = type(index_with_missing)(sorted_values) result = index_with_missing.sort_values(na_position=na_position) tm.assert_index_equal(result, expected)
37.79368
88
0.629764
2,445
20,333
5.074438
0.151738
0.022568
0.028774
0.024664
0.496816
0.430725
0.397921
0.362779
0.34811
0.340695
0
0.008236
0.27148
20,333
537
89
37.86406
0.829339
0.138002
0
0.407507
0
0
0.101209
0.010201
0
0
0
0.001862
0.134048
1
0.067024
false
0.002681
0.024129
0
0.099196
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a6b9fd92e89d1958b00048f55376ec87fde6db2
7,696
py
Python
docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py
ian-r-rose/visualization
ed6d9fab95eb125e7340ab3fad3ed114ed3214af
[ "CC-BY-4.0" ]
11
2017-01-04T18:19:48.000Z
2021-02-21T01:46:33.000Z
docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py
ian-r-rose/visualization
ed6d9fab95eb125e7340ab3fad3ed114ed3214af
[ "CC-BY-4.0" ]
8
2016-09-22T20:49:51.000Z
2019-09-06T23:28:13.000Z
docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py
ian-r-rose/visualization
ed6d9fab95eb125e7340ab3fad3ed114ed3214af
[ "CC-BY-4.0" ]
13
2016-09-22T20:20:06.000Z
2020-07-13T14:48:32.000Z
#!/usr/bin/env python # encoding: utf-8 r""" Riemann solvers for the shallow water equations. The available solvers are: * Roe - Use Roe averages to caluclate the solution to the Riemann problem * HLL - Use a HLL solver * Exact - Use a newton iteration to calculate the exact solution to the Riemann problem .. math:: q_t + f(q)_x = 0 where .. math:: q(x,t) = \left [ \begin{array}{c} h \\ h u \end{array} \right ], the flux function is .. math:: f(q) = \left [ \begin{array}{c} h u \\ hu^2 + 1/2 g h^2 \end{array}\right ]. and :math:`h` is the water column height, :math:`u` the velocity and :math:`g` is the gravitational acceleration. :Authors: Kyle T. Mandli (2009-02-05): Initial version """ # ============================================================================ # Copyright (C) 2009 Kyle T. Mandli <[email protected]> # # Distributed under the terms of the Berkeley Software Distribution (BSD) # license # http://www.opensource.org/licenses/ # ============================================================================ import numpy as np num_eqn = 2 num_waves = 2 def shallow_roe_1D(q_l,q_r,aux_l,aux_r,problem_data): r""" Roe shallow water solver in 1d:: ubar = (sqrt(u_l) + sqrt(u_r)) / (sqrt(h_l) + sqrt(h_r)) cbar = sqrt( 0.5 * g * (h_l + h_r)) W_1 = | 1 | s_1 = ubar - cbar | ubar - cbar | W_2 = | 1 | s_1 = ubar + cbar | ubar + cbar | a1 = 0.5 * ( - delta_hu + (ubar + cbar) * delta_h ) / cbar a2 = 0.5 * ( delta_hu - (ubar - cbar) * delta_h ) / cbar *problem_data* should contain: - *g* - (float) Gravitational constant - *efix* - (bool) Boolean as to whether a entropy fix should be used, if not present, false is assumed :Version: 1.0 (2009-02-05) """ # Array shapes num_rp = q_l.shape[1] # Output arrays wave = np.empty( (num_eqn, num_waves, num_rp) ) s = np.zeros( (num_waves, num_rp) ) amdq = np.zeros( (num_eqn, num_rp) ) apdq = np.zeros( (num_eqn, num_rp) ) # Compute roe-averaged quantities ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) / (np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) ) cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:])) # Compute Flux structure delta = q_r - q_l a1 = 0.5 * (-delta[1,:] + (ubar + cbar) * delta[0,:]) / cbar a2 = 0.5 * ( delta[1,:] - (ubar - cbar) * delta[0,:]) / cbar # Compute each family of waves wave[0,0,:] = a1 wave[1,0,:] = a1 * (ubar - cbar) s[0,:] = ubar - cbar wave[0,1,:] = a2 wave[1,1,:] = a2 * (ubar + cbar) s[1,:] = ubar + cbar if problem_data['efix']: raise NotImplementedError("Entropy fix has not been implemented.") else: s_index = np.zeros((2,num_rp)) for m in xrange(num_eqn): for mw in xrange(num_waves): s_index[0,:] = s[mw,:] amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:] apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:] return wave, s, amdq, apdq def shallow_hll_1D(q_l,q_r,aux_l,aux_r,problem_data): r""" HLL shallow water solver :: W_1 = Q_hat - Q_l s_1 = min(u_l-c_l,u_l+c_l,lambda_roe_1,lambda_roe_2) W_2 = Q_r - Q_hat s_2 = max(u_r-c_r,u_r+c_r,lambda_roe_1,lambda_roe_2) Q_hat = ( f(q_r) - f(q_l) - s_2 * q_r + s_1 * q_l ) / (s_1 - s_2) *problem_data* should contain: - *g* - (float) Gravitational constant :Version: 1.0 (2009-02-05) """ # Array shapes num_rp = q_l.shape[1] num_eqn = 2 num_waves = 2 # Output arrays wave = np.empty( (num_eqn, num_waves, num_rp) ) s = np.empty( (num_waves, num_rp) ) amdq = np.zeros( (num_eqn, num_rp) ) apdq = np.zeros( (num_eqn, num_rp) ) # Compute Roe and right and left speeds ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) / (np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) ) cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:])) u_r = q_r[1,:] / q_r[0,:] c_r = np.sqrt(problem_data['grav'] * q_r[0,:]) u_l = q_l[1,:] / q_l[0,:] c_l = np.sqrt(problem_data['grav'] * q_l[0,:]) # Compute Einfeldt speeds s_index = np.empty((4,num_rp)) s_index[0,:] = ubar+cbar s_index[1,:] = ubar-cbar s_index[2,:] = u_l + c_l s_index[3,:] = u_l - c_l s[0,:] = np.min(s_index,axis=0) s_index[2,:] = u_r + c_r s_index[3,:] = u_r - c_r s[1,:] = np.max(s_index,axis=0) # Compute middle state q_hat = np.empty((2,num_rp)) q_hat[0,:] = ((q_r[1,:] - q_l[1,:] - s[1,:] * q_r[0,:] + s[0,:] * q_l[0,:]) / (s[0,:] - s[1,:])) q_hat[1,:] = ((q_r[1,:]**2/q_r[0,:] + 0.5 * problem_data['grav'] * q_r[0,:]**2 - (q_l[1,:]**2/q_l[0,:] + 0.5 * problem_data['grav'] * q_l[0,:]**2) - s[1,:] * q_r[1,:] + s[0,:] * q_l[1,:]) / (s[0,:] - s[1,:])) # Compute each family of waves wave[:,0,:] = q_hat - q_l wave[:,1,:] = q_r - q_hat # Compute variations s_index = np.zeros((2,num_rp)) for m in xrange(num_eqn): for mw in xrange(num_waves): s_index[0,:] = s[mw,:] amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:] apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:] return wave, s, amdq, apdq def shallow_fwave_1d(q_l, q_r, aux_l, aux_r, problem_data): r"""Shallow water Riemann solver using fwaves Also includes support for bathymetry but be wary if you think you might have dry states as this has not been tested. *problem_data* should contain: - *grav* - (float) Gravitational constant - *sea_level* - (float) Datum from which the dry-state is calculated. :Version: 1.0 (2014-09-05) """ g = problem_data['grav'] num_rp = q_l.shape[1] num_eqn = 2 num_waves = 2 # Output arrays fwave = np.empty( (num_eqn, num_waves, num_rp) ) s = np.empty( (num_waves, num_rp) ) amdq = np.zeros( (num_eqn, num_rp) ) apdq = np.zeros( (num_eqn, num_rp) ) # Extract state u_l = np.where(q_l[0,:] - problem_data['sea_level'] > 1e-3, q_l[1,:] / q_l[0,:], 0.0) u_r = np.where(q_r[0,:] - problem_data['sea_level'] > 1e-3, q_r[1,:] / q_r[0,:], 0.0) phi_l = q_l[0,:] * u_l**2 + 0.5 * g * q_l[0,:]**2 phi_r = q_r[0,:] * u_r**2 + 0.5 * g * q_r[0,:]**2 # Speeds s[0,:] = u_l - np.sqrt(g * q_l[0,:]) s[1,:] = u_r + np.sqrt(g * q_r[0,:]) delta1 = q_r[1,:] - q_l[1,:] delta2 = phi_r - phi_l + g * 0.5 * (q_r[0,:] + q_l[0,:]) * (aux_r[0,:] - aux_l[0,:]) beta1 = (s[1,:] * delta1 - delta2) / (s[1,:] - s[0,:]) beta2 = (delta2 - s[0,:] * delta1) / (s[1,:] - s[0,:]) fwave[0,0,:] = beta1 fwave[1,0,:] = beta1 * s[0,:] fwave[0,1,:] = beta2 fwave[1,1,:] = beta2 * s[1,:] for m in xrange(num_eqn): for mw in xrange(num_waves): amdq[m,:] += (s[mw,:] < 0.0) * fwave[m,mw,:] apdq[m,:] += (s[mw,:] >= 0.0) * fwave[m,mw,:] return fwave, s, amdq, apdq def shallow_exact_1D(q_l,q_r,aux_l,aux_r,problem_data): r""" Exact shallow water Riemann solver .. warning:: This solver has not been implemented. """ raise NotImplementedError("The exact swe solver has not been implemented.")
31.801653
88
0.511954
1,275
7,696
2.900392
0.155294
0.020011
0.013791
0.021092
0.502434
0.44835
0.401839
0.370471
0.316928
0.288264
0
0.047385
0.287032
7,696
241
89
31.93361
0.626572
0.369802
0
0.383178
0
0
0.028945
0
0
0
0
0
0
1
0.037383
false
0
0.009346
0
0.074766
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a6cf8239e9dd6960a26d7ae881835b1d30a1dd5
10,408
py
Python
nuitka/Constants.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
1
2020-04-13T18:56:02.000Z
2020-04-13T18:56:02.000Z
nuitka/Constants.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
1
2020-07-11T17:53:56.000Z
2020-07-11T17:53:56.000Z
nuitka/Constants.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
null
null
null
# Copyright 2020, Kay Hayen, mailto:[email protected] # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Module for constants in Nuitka. This contains tools to compare, classify and test constants. """ import math from types import BuiltinFunctionType from nuitka.Builtins import builtin_type_names from nuitka.PythonVersions import python_version from .__past__ import ( # pylint: disable=I0021,redefined-builtin iterItems, long, unicode, xrange, ) from .Builtins import ( builtin_anon_names, builtin_anon_value_list, builtin_exception_values_list, builtin_named_values_list, ) NoneType = type(None) def compareConstants(a, b): # Many many cases to deal with, pylint: disable=too-many-branches,too-many-return-statements # Supposed fast path for comparison. if type(a) is not type(b): return False # Now it's either not the same, or it is a container that contains NaN or it # is a complex or float that is NaN, the other cases can use == at the end. if type(a) is complex: return compareConstants(a.imag, b.imag) and compareConstants(a.real, b.real) if type(a) is float: # Check sign first, -0.0 is not 0.0, or -nan is not nan, it has a # different sign for a start. if math.copysign(1.0, a) != math.copysign(1.0, b): return False if math.isnan(a) and math.isnan(b): return True return a == b if type(a) in (tuple, list): if len(a) != len(b): return False for ea, eb in zip(a, b): if not compareConstants(ea, eb): return False return True if type(a) is dict: if len(a) != len(b): return False for ea1, ea2 in iterItems(a): for eb1, eb2 in iterItems(b): if compareConstants(ea1, eb1) and compareConstants(ea2, eb2): break else: return False return True if type(a) in (frozenset, set): if len(a) != len(b): return False for ea in a: if ea not in b: # Due to NaN values, we need to compare each set element with # all the other set to be really sure. for eb in b: if compareConstants(ea, eb): break else: return False return True if type(a) is xrange: return str(a) == str(b) # The NaN values of float and complex may let this fail, even if the # constants are built in the same way, therefore above checks. return a == b # These built-in type references are kind of constant too. The list should be # complete. constant_builtin_types = ( int, str, float, list, tuple, set, dict, slice, complex, xrange, NoneType, ) if python_version >= 300: constant_builtin_types += (bytes,) else: constant_builtin_types += ( unicode, long, # This has no name in Python, but the natural one in C-API. builtin_anon_names["instance"], ) def isConstant(constant): # Too many cases and all return, that is how we do it here, # pylint: disable=too-many-branches,too-many-return-statements constant_type = type(constant) if constant_type is dict: for key, value in iterItems(constant): if not isConstant(key): return False if not isConstant(value): return False return True elif constant_type in (tuple, list): for element_value in constant: if not isConstant(element_value): return False return True elif constant_type is slice: if ( not isConstant(constant.start) or not isConstant(constant.stop) or not isConstant(constant.step) ): return False return True elif constant_type in ( str, unicode, complex, int, long, bool, float, NoneType, range, bytes, set, frozenset, xrange, bytearray, ): return True elif constant in (Ellipsis, NoneType, NotImplemented): return True elif constant in builtin_anon_value_list: return True elif constant_type is type: # Maybe pre-build this as a set for quicker testing. return ( constant.__name__ in builtin_type_names or constant in builtin_exception_values_list ) elif constant_type is BuiltinFunctionType and constant in builtin_named_values_list: # TODO: Some others could also be usable and even interesting, but # then probably should go into other node types, e.g. str.join is # a candidate. return True else: return False def isMutable(constant): """ Is a constant mutable That means a user of a reference to it, can modify it. Strings are a prime example of immutable, dictionaries are mutable. """ # Many cases and all return, that is how we do it here, # pylint: disable=too-many-return-statements constant_type = type(constant) if constant_type in ( str, unicode, complex, int, long, bool, float, NoneType, range, bytes, slice, xrange, type, BuiltinFunctionType, ): return False elif constant_type in (dict, list, set, bytearray): return True elif constant_type is tuple: for value in constant: if isMutable(value): return True return False elif constant_type is frozenset: for value in constant: if isMutable(value): return True return False elif constant is Ellipsis: return False elif constant is NotImplemented: return False else: assert False, repr(constant) def isHashable(constant): """ Is a constant hashable That means a user of a reference to it, can use it for dicts and set keys. This is distinct from mutable, there is one types that is not mutable, and still not hashable: slices. """ # Many cases and all return, that is how we do it here, # pylint: disable=too-many-return-statements constant_type = type(constant) if constant_type in ( str, unicode, complex, int, long, bool, float, NoneType, xrange, bytes, type, BuiltinFunctionType, ): return True elif constant_type in (dict, list, set, slice, bytearray): return False elif constant_type is tuple: for value in constant: if not isHashable(value): return False return True elif constant_type is frozenset: for value in constant: if not isHashable(value): return False return True elif constant is Ellipsis: return True else: assert False, constant_type def getUnhashableConstant(constant): # Too many cases and all return, that is how we do it here, # pylint: disable=too-many-return-statements constant_type = type(constant) if constant_type in ( str, unicode, complex, int, long, bool, float, NoneType, xrange, bytes, type, BuiltinFunctionType, ): return None elif constant_type in (dict, list, set): return constant elif constant_type is tuple: for value in constant: res = getUnhashableConstant(value) if res is not None: return res return None elif constant is Ellipsis: return None elif constant in constant_builtin_types: return None elif constant_type is slice: return None else: assert False, constant_type def isIterableConstant(constant): return type(constant) in ( str, unicode, list, tuple, set, frozenset, dict, xrange, bytes, bytearray, ) def getConstantIterationLength(constant): assert isIterableConstant(constant) return len(constant) def isNumberConstant(constant): return type(constant) in (int, long, float, bool) def isIndexConstant(constant): return type(constant) in (int, long, bool) def createConstantDict(keys, values): # Create it proper size immediately. constant_value = dict.fromkeys(keys, None) for key, value in zip(keys, values): constant_value[key] = value return constant_value def getConstantWeight(constant): constant_type = type(constant) if constant_type is dict: result = 0 for key, value in iterItems(constant): result += getConstantWeight(key) result += getConstantWeight(value) return result elif constant_type in (tuple, list, set, frozenset): result = 0 for element_value in constant: result += getConstantWeight(element_value) return result else: return 1 def isCompileTimeConstantValue(value): """ Determine if a value will be usable at compile time. """ # This needs to match code in makeCompileTimeConstantReplacementNode if isConstant(value): return True elif type(value) is type: return True else: return False
25.635468
96
0.600596
1,275
10,408
4.841569
0.218824
0.052487
0.038879
0.035639
0.377288
0.323668
0.28981
0.264377
0.244128
0.192289
0
0.004912
0.334935
10,408
405
97
25.698765
0.886882
0.26249
0
0.656028
0
0
0.001059
0
0
0
0
0.002469
0.014184
1
0.042553
false
0
0.021277
0.010638
0.283688
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a6d56d36f5446ad1de42a20d6e31bc1aa3492a2
13,724
py
Python
functions/predictionLambda/botocore/endpoint.py
chriscoombs/aws-comparing-algorithms-performance-mlops-cdk
6d3888f3ecd667ee76dc473edba37a608786ed2e
[ "Apache-2.0" ]
40
2020-07-11T10:07:51.000Z
2021-12-11T17:09:20.000Z
functions/predictionLambda/botocore/endpoint.py
chriscoombs/aws-comparing-algorithms-performance-mlops-cdk
6d3888f3ecd667ee76dc473edba37a608786ed2e
[ "Apache-2.0" ]
21
2019-11-10T05:38:06.000Z
2022-03-10T15:07:48.000Z
functions/predictionLambda/botocore/endpoint.py
chriscoombs/aws-comparing-algorithms-performance-mlops-cdk
6d3888f3ecd667ee76dc473edba37a608786ed2e
[ "Apache-2.0" ]
37
2020-07-09T23:12:30.000Z
2022-03-16T11:15:58.000Z
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import logging import time import threading from botocore.vendored import six from botocore.awsrequest import create_request_object from botocore.exceptions import HTTPClientError from botocore.httpsession import URLLib3Session from botocore.utils import is_valid_endpoint_url, get_environ_proxies from botocore.hooks import first_non_none_response from botocore.history import get_global_history_recorder from botocore.response import StreamingBody from botocore import parsers logger = logging.getLogger(__name__) history_recorder = get_global_history_recorder() DEFAULT_TIMEOUT = 60 MAX_POOL_CONNECTIONS = 10 def convert_to_response_dict(http_response, operation_model): """Convert an HTTP response object to a request dict. This converts the requests library's HTTP response object to a dictionary. :type http_response: botocore.vendored.requests.model.Response :param http_response: The HTTP response from an AWS service request. :rtype: dict :return: A response dictionary which will contain the following keys: * headers (dict) * status_code (int) * body (string or file-like object) """ response_dict = { 'headers': http_response.headers, 'status_code': http_response.status_code, 'context': { 'operation_name': operation_model.name, } } if response_dict['status_code'] >= 300: response_dict['body'] = http_response.content elif operation_model.has_event_stream_output: response_dict['body'] = http_response.raw elif operation_model.has_streaming_output: length = response_dict['headers'].get('content-length') response_dict['body'] = StreamingBody(http_response.raw, length) else: response_dict['body'] = http_response.content return response_dict class Endpoint(object): """ Represents an endpoint for a particular service in a specific region. Only an endpoint can make requests. :ivar service: The Service object that describes this endpoints service. :ivar host: The fully qualified endpoint hostname. :ivar session: The session object. """ def __init__(self, host, endpoint_prefix, event_emitter, response_parser_factory=None, http_session=None): self._endpoint_prefix = endpoint_prefix self._event_emitter = event_emitter self.host = host self._lock = threading.Lock() if response_parser_factory is None: response_parser_factory = parsers.ResponseParserFactory() self._response_parser_factory = response_parser_factory self.http_session = http_session if self.http_session is None: self.http_session = URLLib3Session() def __repr__(self): return '%s(%s)' % (self._endpoint_prefix, self.host) def make_request(self, operation_model, request_dict): logger.debug("Making request for %s with params: %s", operation_model, request_dict) return self._send_request(request_dict, operation_model) def create_request(self, params, operation_model=None): request = create_request_object(params) if operation_model: request.stream_output = any([ operation_model.has_streaming_output, operation_model.has_event_stream_output ]) service_id = operation_model.service_model.service_id.hyphenize() event_name = 'request-created.{service_id}.{op_name}'.format( service_id=service_id, op_name=operation_model.name) self._event_emitter.emit(event_name, request=request, operation_name=operation_model.name) prepared_request = self.prepare_request(request) return prepared_request def _encode_headers(self, headers): # In place encoding of headers to utf-8 if they are unicode. for key, value in headers.items(): if isinstance(value, six.text_type): headers[key] = value.encode('utf-8') def prepare_request(self, request): self._encode_headers(request.headers) return request.prepare() def _send_request(self, request_dict, operation_model): attempts = 1 request = self.create_request(request_dict, operation_model) context = request_dict['context'] success_response, exception = self._get_response( request, operation_model, context) while self._needs_retry(attempts, operation_model, request_dict, success_response, exception): attempts += 1 # If there is a stream associated with the request, we need # to reset it before attempting to send the request again. # This will ensure that we resend the entire contents of the # body. request.reset_stream() # Create a new request when retried (including a new signature). request = self.create_request( request_dict, operation_model) success_response, exception = self._get_response( request, operation_model, context) if success_response is not None and \ 'ResponseMetadata' in success_response[1]: # We want to share num retries, not num attempts. total_retries = attempts - 1 success_response[1]['ResponseMetadata']['RetryAttempts'] = \ total_retries if exception is not None: raise exception else: return success_response def _get_response(self, request, operation_model, context): # This will return a tuple of (success_response, exception) # and success_response is itself a tuple of # (http_response, parsed_dict). # If an exception occurs then the success_response is None. # If no exception occurs then exception is None. success_response, exception = self._do_get_response( request, operation_model) kwargs_to_emit = { 'response_dict': None, 'parsed_response': None, 'context': context, 'exception': exception, } if success_response is not None: http_response, parsed_response = success_response kwargs_to_emit['parsed_response'] = parsed_response kwargs_to_emit['response_dict'] = convert_to_response_dict( http_response, operation_model) service_id = operation_model.service_model.service_id.hyphenize() self._event_emitter.emit( 'response-received.%s.%s' % ( service_id, operation_model.name), **kwargs_to_emit) return success_response, exception def _do_get_response(self, request, operation_model): try: logger.debug("Sending http request: %s", request) history_recorder.record('HTTP_REQUEST', { 'method': request.method, 'headers': request.headers, 'streaming': operation_model.has_streaming_input, 'url': request.url, 'body': request.body }) service_id = operation_model.service_model.service_id.hyphenize() event_name = 'before-send.%s.%s' % (service_id, operation_model.name) responses = self._event_emitter.emit(event_name, request=request) http_response = first_non_none_response(responses) if http_response is None: http_response = self._send(request) except HTTPClientError as e: return (None, e) except Exception as e: logger.debug("Exception received when sending HTTP request.", exc_info=True) return (None, e) # This returns the http_response and the parsed_data. response_dict = convert_to_response_dict(http_response, operation_model) http_response_record_dict = response_dict.copy() http_response_record_dict['streaming'] = \ operation_model.has_streaming_output history_recorder.record('HTTP_RESPONSE', http_response_record_dict) protocol = operation_model.metadata['protocol'] parser = self._response_parser_factory.create_parser(protocol) parsed_response = parser.parse( response_dict, operation_model.output_shape) # Do a second parsing pass to pick up on any modeled error fields # NOTE: Ideally, we would push this down into the parser classes but # they currently have no reference to the operation or service model # The parsers should probably take the operation model instead of # output shape but we can't change that now if http_response.status_code >= 300: self._add_modeled_error_fields( response_dict, parsed_response, operation_model, parser, ) history_recorder.record('PARSED_RESPONSE', parsed_response) return (http_response, parsed_response), None def _add_modeled_error_fields( self, response_dict, parsed_response, operation_model, parser, ): error_code = parsed_response.get("Error", {}).get("Code") if error_code is None: return service_model = operation_model.service_model error_shape = service_model.shape_for_error_code(error_code) if error_shape is None: return modeled_parse = parser.parse(response_dict, error_shape) # TODO: avoid naming conflicts with ResponseMetadata and Error parsed_response.update(modeled_parse) def _needs_retry(self, attempts, operation_model, request_dict, response=None, caught_exception=None): service_id = operation_model.service_model.service_id.hyphenize() event_name = 'needs-retry.%s.%s' % ( service_id, operation_model.name) responses = self._event_emitter.emit( event_name, response=response, endpoint=self, operation=operation_model, attempts=attempts, caught_exception=caught_exception, request_dict=request_dict) handler_response = first_non_none_response(responses) if handler_response is None: return False else: # Request needs to be retried, and we need to sleep # for the specified number of times. logger.debug("Response received to retry, sleeping for " "%s seconds", handler_response) time.sleep(handler_response) return True def _send(self, request): return self.http_session.send(request) class EndpointCreator(object): def __init__(self, event_emitter): self._event_emitter = event_emitter def create_endpoint(self, service_model, region_name, endpoint_url, verify=None, response_parser_factory=None, timeout=DEFAULT_TIMEOUT, max_pool_connections=MAX_POOL_CONNECTIONS, http_session_cls=URLLib3Session, proxies=None, socket_options=None, client_cert=None): if not is_valid_endpoint_url(endpoint_url): raise ValueError("Invalid endpoint: %s" % endpoint_url) if proxies is None: proxies = self._get_proxies(endpoint_url) endpoint_prefix = service_model.endpoint_prefix logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout) http_session = http_session_cls( timeout=timeout, proxies=proxies, verify=self._get_verify_value(verify), max_pool_connections=max_pool_connections, socket_options=socket_options, client_cert=client_cert, ) return Endpoint( endpoint_url, endpoint_prefix=endpoint_prefix, event_emitter=self._event_emitter, response_parser_factory=response_parser_factory, http_session=http_session ) def _get_proxies(self, url): # We could also support getting proxies from a config file, # but for now proxy support is taken from the environment. return get_environ_proxies(url) def _get_verify_value(self, verify): # This is to account for: # https://github.com/kennethreitz/requests/issues/1436 # where we need to honor REQUESTS_CA_BUNDLE because we're creating our # own request objects. # First, if verify is not None, then the user explicitly specified # a value so this automatically wins. if verify is not None: return verify # Otherwise use the value from REQUESTS_CA_BUNDLE, or default to # True if the env var does not exist. return os.environ.get('REQUESTS_CA_BUNDLE', True)
42.09816
81
0.6587
1,596
13,724
5.403509
0.216165
0.066558
0.021916
0.018669
0.237129
0.16744
0.11897
0.095895
0.072472
0.066327
0
0.004406
0.27237
13,724
325
82
42.227692
0.859203
0.20905
0
0.092511
0
0
0.058813
0.005686
0
0
0
0.003077
0
1
0.07489
false
0
0.057269
0.013216
0.22467
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a6dde8d68a99fd68fff6d0aa6d0f4f64dc22408
4,018
py
Python
backup/26.py
accordinglyto/dferte
d4b8449c1633973dc538c9e72aca5d37802a4ee4
[ "MIT" ]
null
null
null
backup/26.py
accordinglyto/dferte
d4b8449c1633973dc538c9e72aca5d37802a4ee4
[ "MIT" ]
8
2020-11-13T18:55:17.000Z
2022-03-12T00:34:40.000Z
backup/26.py
accordinglyto/dferte
d4b8449c1633973dc538c9e72aca5d37802a4ee4
[ "MIT" ]
null
null
null
from numpy import genfromtxt import matplotlib.pyplot as plt import mpl_finance import numpy as np import uuid import matplotlib # Input your csv file here with historical data ad = genfromtxt(f"../financial_data/SM.csv", delimiter=",", dtype=str) def convolve_sma(array, period): return np.convolve(array, np.ones((period,)) / period, mode="valid") def graphwerk(start, finish): open = [] high = [] low = [] close = [] volume = [] # decision = [] date = [] c_open = [] c_high = [] c_low = [] c_close = [] c_volume = [] c_date = [] c_start = start + 12 for x in range(finish - start): c_open.append(float(pd[c_start][1])) c_high.append(float(pd[c_start][2])) c_low.append(float(pd[c_start][3])) c_close.append(float(pd[c_start][4])) c_volume.append(float(pd[c_start][5])) c_date.append(pd[c_start][0]) c_start = c_start + 1 for x in range(finish - start): # Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out # what means open, high and close in their respective order. open.append(float(pd[start][1])) high.append(float(pd[start][2])) low.append(float(pd[start][3])) close.append(float(pd[start][4])) volume.append(float(pd[start][5])) # decision.append(str(pd[start][6])) date.append(pd[start][0]) start = start + 1 decision = "sell" min_forecast = min(c_low) max_forecast = max(c_high) if close[-1] * 1.03 < max_forecast: decision = "buy" # for z in all_prices: # if close[-1] * 1.03 < z: # decision = "buy" sma = convolve_sma(close, 5) smb = list(sma) diff = sma[-1] - sma[-2] for x in range(len(close) - len(smb)): smb.append(smb[-1] + diff) fig = plt.figure(num=1, figsize=(3, 3), dpi=50, facecolor="w", edgecolor="k") dx = fig.add_subplot(111) # mpl_finance.volume_overlay(ax, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=1) mpl_finance.candlestick2_ochl( dx, open, close, high, low, width=1.5, colorup="g", colordown="r", alpha=0.5 ) plt.autoscale() # plt.plot(smb, color="blue", linewidth=10, alpha=0.5) plt.axis("off") if decision == "sell": print("last value: " + str(close[-1])) print( "range of values in next 13 bars: " + str(min_forecast) + "-" + str(max_forecast) ) print("sell") plt.savefig(sell_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight") else: print("last value: " + str(close[-1])) print( "range of values in next 13 bars: " + str(min_forecast) + "-" + str(max_forecast) ) print("buy") plt.savefig(buy_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight") # if close[-1] >= close_next: # print('previous value is bigger') # print('last value: ' + str(close[-1])) # print('next value: ' + str(close_next)) # print('sell') # plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight') # else: # print('previous value is smaller') # print('last value: '+ str(close[-1])) # print('next value: ' + str(close_next)) # print('buy') # plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight') # plt.show() open.clear() close.clear() volume.clear() high.clear() low.clear() plt.cla() plt.clf() # output = [] # with open("STOCKbluechip.csv") as f: # output = [str(s) for line in f.readlines() for s in line[:-1].split(",")] # for stock in output: pd = ad buy_dir = "../data/train/buy/" sell_dir = "../data/train/sell/" iter = 0 for x in range(len(pd)): graphwerk(iter, iter + 12) iter = iter + 2
28.097902
128
0.558238
553
4,018
3.960217
0.292948
0.050228
0.059361
0.031963
0.311416
0.245205
0.225114
0.225114
0.225114
0.225114
0
0.02308
0.277501
4,018
142
129
28.295775
0.731312
0.288203
0
0.155556
0
0
0.071328
0.008475
0
0
0
0
0
1
0.022222
false
0
0.066667
0.011111
0.1
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a6e5f7f79247bb69a8f187a793986d06aaf806b
3,091
py
Python
streams/readers/arff_reader.py
JanSurft/tornado
2c07686c5358d2bcb15d6edac3126ad9346c3c76
[ "MIT" ]
103
2017-10-01T20:24:58.000Z
2022-03-16T09:09:10.000Z
streams/readers/arff_reader.py
JanSurft/tornado
2c07686c5358d2bcb15d6edac3126ad9346c3c76
[ "MIT" ]
2
2019-09-17T11:06:26.000Z
2021-11-08T23:57:46.000Z
streams/readers/arff_reader.py
JanSurft/tornado
2c07686c5358d2bcb15d6edac3126ad9346c3c76
[ "MIT" ]
28
2018-12-18T00:43:10.000Z
2022-03-04T08:39:47.000Z
""" The Tornado Framework By Ali Pesaranghader University of Ottawa, Ontario, Canada E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com """ import re from data_structures.attribute import Attribute from dictionary.tornado_dictionary import TornadoDic class ARFFReader: """This class is used to read a .arff file.""" @staticmethod def read(file_path): labels = [] attributes = [] attributes_min_max = [] records = [] data_flag = False reader = open(file_path, "r") for line in reader: if line.strip() == '': continue if line.startswith("@attribute") or line.startswith("@ATTRIBUTE"): line = line.strip('\n\r\t') line = line.split(' ') attribute_name = line[1] attribute_value_range = line[2] attribute = Attribute() attribute.set_name(attribute_name) if attribute_value_range.lower() in ['numeric', 'real', 'integer']: attribute_type = TornadoDic.NUMERIC_ATTRIBUTE attribute_value_range = [] attributes_min_max.append([0, 0]) else: attribute_type = TornadoDic.NOMINAL_ATTRIBUTE attribute_value_range = attribute_value_range.strip('{}').replace("'", "") attribute_value_range = attribute_value_range.split(',') attributes_min_max.append([None, None]) attribute.set_type(attribute_type) attribute.set_possible_values(attribute_value_range) attributes.append(attribute) elif line.startswith("@data") or line.startswith("@DATA"): data_flag = True labels = attributes[len(attributes) - 1].POSSIBLE_VALUES attributes.pop(len(attributes) - 1) continue elif data_flag is True: line = re.sub('\s+', '', line) elements = line.split(',') for i in range(0, len(elements) - 1): if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE: elements[i] = float(elements[i]) min_value = attributes_min_max[i][0] max_value = attributes_min_max[i][1] if elements[i] < min_value: min_value = elements[i] elif elements[i] > max_value: max_value = elements[i] attributes_min_max[i] = [min_value, max_value] records.append(elements) for i in range(0, len(attributes)): if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE: attributes[i].set_bounds_values(attributes_min_max[i][0], attributes_min_max[i][1]) return labels, attributes, records
38.6375
100
0.525396
306
3,091
5.101307
0.303922
0.066624
0.081999
0.054452
0.175529
0.122998
0.055093
0
0
0
0
0.00676
0.377871
3,091
79
101
39.126582
0.804992
0.061792
0
0.070175
0
0
0.022792
0
0
0
0
0
0
1
0.017544
false
0
0.052632
0
0.105263
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a703f7100900fb7196e6525d9f4720fdc63dbae
11,514
py
Python
CarModel.py
JaredFG/Multiagentes-Unity
37f7ec5c0588865ef08b50df83566a43d817bebf
[ "MIT" ]
null
null
null
CarModel.py
JaredFG/Multiagentes-Unity
37f7ec5c0588865ef08b50df83566a43d817bebf
[ "MIT" ]
null
null
null
CarModel.py
JaredFG/Multiagentes-Unity
37f7ec5c0588865ef08b50df83566a43d817bebf
[ "MIT" ]
1
2022-02-10T20:33:44.000Z
2022-02-10T20:33:44.000Z
''' Autores:Eduardo Rodríguez López A01749381 Rebeca Rojas Pérez A01751192 Jared Abraham Flores Guarneros A01379868 Eduardo Aguilar Chías A01749375 ''' from random import random from mesa.visualization.modules import CanvasGrid from mesa.visualization.ModularVisualization import ModularServer from mesa.batchrunner import BatchRunner from mesa.datacollection import DataCollector from mesa.space import MultiGrid from mesa import Agent , Model from mesa.time import RandomActivation #Clase para crear a los agentes automóviles class CarAgent(Agent): def __init__(self, unique_id, model): super().__init__(unique_id, model) self.next_cell = None self.direction = None self.agent_type = 0 #Función para validar si la posición es válida, en caso de que sea válida regresa True, en caso contrario #regresa False. def is_valid(self, position): if position[0] < self.model.width and position[1] < self.model.height and position[0] >= 0 and position[1] >= 0: if not self.model.grid.is_cell_empty(position): return True return False #Función para recibir las posibles celdas a dónde moverse, regresa la posición de la calle. def get_poss_cell(self): neighborhood = self.model.grid.get_neighborhood(self.pos, moore=False, include_center=False) for cell in neighborhood: for agent in self.model.grid.get_cell_list_contents(cell): if agent.agent_type == 2: next_dir = (self.pos[0] - agent.pos[0], self.pos[1] - agent.pos[1]) if next_dir[0] * -1 != self.direction[0] and next_dir[1] * -1 != self.direction[1]: return agent.pos #Función para avanzar hacia el frente, regresa el valor de la variable move que son coordenadas. def get_nextcell(self): move = (self.pos[0] + self.direction[0], self.pos[1] + self.direction[1]) return move #Función para obtener la dirección hacia donde debe moverse el automóvil, regresa la dirección # de la calle. def get_nextdirect(self, position): for agent in self.model.grid.get_cell_list_contents(position): if agent.agent_type == 2: return agent.direction #Función para dar vuelta, regresa la dirección de la calle. def turn(self): for cell in self.model.grid.get_neighborhood(self.pos, moore=False, include_center=False): for agent in self.model.grid.get_cell_list_contents(cell): if agent.agent_type == 2: if agent.direction != self.direction: return agent.direction return None #Función para revisar la luz de los semáforos, regresa la luz del semáforo en caso # de que el automóvil tenga uno de vecino. En caso contrario regresa True. def check_light(self): for agent in self.model.grid.get_cell_list_contents(self.next_cell): if agent.agent_type == 1: return agent.light return True #Función para checar si hay otro automovil enfrente, regresa un valor booleano. def check_car(self): for agent in self.model.grid.get_cell_list_contents(self.next_cell): if agent.agent_type == 0: return False return True def step(self): #Variable para guardar el resultado de la función get_nextcell(). next_cell = self.get_nextcell() #Condición, si la siguiente celda es válida, se guarda en el automóvil y se cambia su dirección. if self.is_valid(next_cell): self.next_cell = next_cell self.direction = self.get_nextdirect(self.next_cell) #En caso contrario una varible guarda el resultado de la función turn(). else: direction = self.turn() #Condición, si la variable direction es verdadera se cambia la dirección del automóvil. if direction: self.direction = direction #En caso contrario una variable guarda el resultado de la función get_poss_cell(). #La siguiente celda del automóvil cambia al valor de la variable. else: poss = self.get_poss_cell() self.next_cell = poss if self.check_car(): if self.check_light(): self.model.grid.move_agent(self, self.next_cell) #Clase para crear a los agentes semáforos. class TrafficLightAgent(Agent): def __init__(self, unique_id, model): super().__init__(unique_id, model) self.agent_type = 1 self.light = False #Función para cambiar la luz de los semáforos. def change(self): self.light = not self.light #Función para contar el número de automóviles que hay en un semáforo, # regresa el contador con el número de automóviles. def count_cars(self): counter = 0 neighborhood = self.model.grid.get_neighborhood(self.pos, moore=False, include_center=True) for cell in neighborhood: for agent in self.model.grid.get_cell_list_contents(cell): if agent.agent_type == 0: counter += 1 return counter #Clase para crear a los agentes calle. class StreetAgent(Agent): def __init__(self, unique_id, model): super().__init__(unique_id, model) self.direction = None self.agent_type = 2 #Clase para crear el modelo. class CarModel(Model): def __init__(self, N: int, width: int, height: int): self.num_agents = N self.running = True self.grid = MultiGrid(width, height, False) self.schedule = RandomActivation(self) self.uids = 0 self.lights_ids = 0 self.width = width self.height = height street_pos = [] self.lights = 4 #Loop para crear la parte interior de las calles, donde está el cruce. for row in range(height): for col in range(width): agent = StreetAgent(self.uids, self) self.uids += 1 flag = True if col > width // 2 - 2 and col < width // 2 + 1 and col > 1 and col < height - 1: if row >= height // 2: agent.direction = (0, 1) else: agent.direction = (0, -1) elif row > height // 2 - 2 and row < height // 2 + 1 and row > 1 and row < width - 1: if col > width // 2: agent.direction = (-1, 0) else: agent.direction = (1, 0) else: flag = False if flag: self.grid.place_agent(agent, (col, row)) street_pos.append((col, row)) #Loop para crear la parte exterior de las calles, donde NO está el cruce. for row in range(height): for col in range(width): agent = StreetAgent(self.uids, self) self.uids += 1 flag = True if row < 2: if col < width - 2: agent.direction = (1, 0) else: agent.direction = (0, 1) elif row >= 2 and row < height - 2: if col < 2: agent.direction = (0, -1) elif col >= width - 2 and col < width: agent.direction = (0, 1) else: flag = False elif row >= height -2 and row < height: if col < width - 2: agent.direction = (-1, 0) else: agent.direction = (0, 1) else: flag = False if flag: self.grid.place_agent(agent, (col, row)) street_pos.append((col, row)) #Loop para crear los automóviles en posiciones random donde hay calle. for i in range(self.num_agents): a = CarAgent(self.uids, self) self.uids += 1 pos_index = self.random.randint(0, len(street_pos) - 1) pos = street_pos.pop(pos_index) a.direction = self.grid.get_cell_list_contents(pos)[0].direction self.grid.place_agent(a, pos) self.schedule.add(a) #Crear los semáforos for i in range(self.lights): alight = TrafficLightAgent(self.lights_ids, self) self.lights_ids += 1 self.schedule.add(alight) x = 8 y = 9 if i == 0: alight.light = True self.grid.place_agent(alight, (x, y)) elif i == 1: x = 8 y = 10 alight.light = True self.grid.place_agent(alight, (x, y)) elif i == 2: x = 11 y = 9 alight.light = False self.grid.place_agent(alight, (x, y)) else: x = 11 y = 10 alight.light = False self.grid.place_agent(alight, (x, y)) def step(self): #Contadores para saber cuáles semáforos tienen más automóviles. count_left = 0 count_right = 0 #Loop para añadir a los contadores la cantidad de automóviles que hay en cada lado. for agent in self.schedule.agents: if agent.agent_type == 1: if agent.unique_id == 0: count_left += agent.count_cars() elif agent.unique_id == 1: count_left += agent.count_cars() elif agent.unique_id == 2: count_right += agent.count_cars() elif agent.unique_id == 3: count_right += agent.count_cars() #Condición, si el lado izquierdo tiene más automóviles, los semáforos del lado izquierdo #dan luz verde y los semáforos del lado derecho dan luz roja. if count_left >= count_right: for agent in self.schedule.agents: if agent.agent_type == 1: if agent.unique_id == 0: agent.light = True elif agent.unique_id == 1: agent.light = True elif agent.unique_id == 2: agent.light = False else: agent.light = False #En caso contrario los semáforos del lado derecho dan luz verde y los semáforos del lado #izquierdo dan luz roja. else: for agent in self.schedule.agents: if agent.agent_type == 1: if agent.unique_id == 0: agent.light = False elif agent.unique_id == 1: agent.light = False elif agent.unique_id == 2: agent.light = True else: agent.light = True self.schedule.step()
41.566787
120
0.538822
1,378
11,514
4.394775
0.164731
0.021136
0.023613
0.023778
0.454921
0.398118
0.358818
0.298382
0.288144
0.274934
0
0.020489
0.385357
11,514
277
121
41.566787
0.835241
0.212263
0
0.553991
0
0
0
0
0
0
0
0
0
1
0.070423
false
0
0.037559
0
0.183099
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a705dbb2cd1b609cc2090d60bc5b82810db8095
1,684
py
Python
qcodes/widgets/display.py
nulinspiratie/Qcodes
d050d38ac83f532523a39549c3247dfa6096a36e
[ "MIT" ]
2
2017-02-27T06:02:39.000Z
2019-06-03T04:56:59.000Z
qcodes/widgets/display.py
nulinspiratie/Qcodes
d050d38ac83f532523a39549c3247dfa6096a36e
[ "MIT" ]
50
2017-04-12T04:03:15.000Z
2022-03-09T00:41:43.000Z
qcodes/widgets/display.py
nulinspiratie/Qcodes
d050d38ac83f532523a39549c3247dfa6096a36e
[ "MIT" ]
null
null
null
"""Helper for adding content stored in a file to a jupyter notebook.""" import os from pkg_resources import resource_string from IPython.display import display, Javascript, HTML # Originally I implemented this using regular open() and read(), so it # could use relative paths from the importing file. # # But for distributable packages, pkg_resources.resource_string is the # best way to load data files, because it works even if the package is # in an egg or zip file. See: # http://pythonhosted.org/setuptools/setuptools.html#accessing-data-files-at-runtime def display_auto(qcodes_path, file_type=None): """ Display some javascript, css, or html content in a jupyter notebook. Content comes from a package-relative file path. Will use the file extension to determine file type unless overridden by file_type Args: qcodes_path (str): the path to the target file within the qcodes package, like 'widgets/widgets.js' file_type (Optional[str]): Override the file extension to determine what type of file this is. Case insensitive, supported values are 'js', 'css', and 'html' """ contents = resource_string('qcodes', qcodes_path).decode('utf-8') if file_type is None: ext = os.path.splitext(qcodes_path)[1].lower() elif 'js' in file_type.lower(): ext = '.js' elif 'css' in file_type.lower(): ext = '.css' else: ext = '.html' if ext == '.js': display(Javascript(contents)) elif ext == '.css': display(HTML('<style>' + contents + '</style>')) else: # default to html. Anything else? display(HTML(contents))
35.083333
84
0.672803
236
1,684
4.733051
0.461864
0.050134
0.028648
0.032229
0.080573
0
0
0
0
0
0
0.001538
0.228029
1,684
47
85
35.829787
0.857692
0.572447
0
0.105263
0
0
0.075873
0
0
0
0
0
0
1
0.052632
false
0
0.157895
0
0.210526
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a71f08eeecbd606e19448cf8f9c90856e40cbac
6,697
py
Python
hubcontrol.py
smr99/lego-hub-tk
d3b86847873fa80deebf993ccd44b4d3d8f9bf40
[ "MIT" ]
16
2021-02-17T01:59:39.000Z
2022-03-29T05:10:12.000Z
hubcontrol.py
smr99/lego-hub-tk
d3b86847873fa80deebf993ccd44b4d3d8f9bf40
[ "MIT" ]
15
2021-04-20T04:01:36.000Z
2022-02-01T02:46:30.000Z
hubcontrol.py
smr99/lego-hub-tk
d3b86847873fa80deebf993ccd44b4d3d8f9bf40
[ "MIT" ]
9
2021-04-18T20:29:21.000Z
2022-03-31T11:50:04.000Z
#! /usr/bin/python3 import base64 from data.ProgramHubLogger import ProgramHubLogger from datetime import datetime import logging import os import sys from ui.MotionSensor import MotionSensorWidget from ui.PositionStatus import PositionStatusWidget from ui.DevicePortWidget import DevicePortWidget from ui.ConnectionWidget import ConnectionWidget from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtWidgets import QApplication, QPushButton, QWidget from comm.HubClient import ConnectionState, HubClient from data.HubMonitor import HubMonitor from data.HubStatus import HubStatus from ui.DeviceStatusWidget import DeviceStatusWidget from utils.setup import setup_logging logger = logging.getLogger("App") log_filename = os.path.dirname(__file__) + "/logs/hubcontrol.log" setup_logging(log_filename) def list_programs(info): storage = info['storage'] slots = info['slots'] print("%4s %-40s %6s %-20s %-12s %-10s" % ("Slot", "Decoded Name", "Size", "Last Modified", "Project_id", "Type")) for i in range(20): if str(i) in slots: sl = slots[str(i)] modified = datetime.utcfromtimestamp(sl['modified']/1000).strftime('%Y-%m-%d %H:%M:%S') try: decoded_name = base64.b64decode(sl['name']).decode('utf-8') except: decoded_name = sl['name'] try: project = sl['project_id'] except: project = " " try: type = sl['type'] except: type = " " print("%4s %-40s %5db %-20s %-12s %-10s" % (i, decoded_name, sl['size'], modified, project, type)) print(("Storage free %s%s of total %s%s" % (storage['free'], storage['unit'], storage['total'], storage['unit']))) class ConsoleWidget(QTextEdit): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.setReadOnly(True) self.setLineWrapMode(QTextEdit.NoWrap) def append(self, text): self.moveCursor(QTextCursor.End) self.insertPlainText(text) sb = self.verticalScrollBar() sb.setValue(sb.maximum()) def append_line(self, text): self.append(text + '\n') class ProgramWidget(QWidget): def __init__(self, hub_client : HubClient, hub_monitor : HubMonitor, *args, **kwargs): super().__init__(*args, **kwargs) self._client = hub_client self._monitor = hub_monitor self._executing_program_label = QLabel() self._slot_spinbox = QSpinBox() self._run_button = QPushButton('Run') self._run_button.clicked.connect(self.run_program) self._stop_button = QPushButton('Stop') self._stop_button.clicked.connect(self.stop_program) runstop_widget = QWidget() layout = QHBoxLayout(runstop_widget) layout.addWidget(QLabel('Slot:')) layout.addWidget(self._slot_spinbox) layout.addWidget(self._run_button) layout.addWidget(self._stop_button) box = QGroupBox('Program Execution') layout = QFormLayout(box) layout.addRow('Executing Program ID:', self._executing_program_label) layout.addRow(runstop_widget) layout = QVBoxLayout() layout.addWidget(box) self.setLayout(layout) def refresh(self): is_connected = self._client.state == ConnectionState.TELEMETRY self._executing_program_label.setText(self._monitor.execution_status[0]) self._run_button.setEnabled(is_connected) self._stop_button.setEnabled(is_connected) def run_program(self): slot = self._slot_spinbox.value() r = self._client.program_execute(slot) logger.debug('Program execute returns: %s', r) def stop_program(self): r = self._client.program_terminate() logger.debug('Program terminate returns: %s', r) class MainWindow(QMainWindow): def __init__(self, hub_client, hub_monitor, *args, **kwargs): super().__init__(*args, **kwargs) status = hub_monitor.status self._client = hub_client self._hub_monitor = hub_monitor self.position_widget = PositionStatusWidget(status) self.motion_widget = MotionSensorWidget(status) self.program_widget = ProgramWidget(hub_client, hub_monitor) self.port_widget = DevicePortWidget(status) self.console = ConsoleWidget() self.list_button = QPushButton('List') self.list_button.clicked.connect(self.list_programs) # Top row (status) top_box = QWidget() layout = QHBoxLayout(top_box) layout.addWidget(ConnectionWidget(hub_client)) layout.addWidget(self.position_widget) layout.addWidget(self.motion_widget) # Button bar buttons = QWidget() layout = QHBoxLayout(buttons) layout.addWidget(self.list_button) mw = QWidget() layout = QVBoxLayout(mw) layout.addWidget(top_box) layout.addWidget(buttons) layout.addWidget(self.program_widget) layout.addWidget(self.port_widget) layout.addWidget(self.console) self.setCentralWidget(mw) hub_monitor.events.console_print += self.console.append # Timer refresh trick from https://github.com/Taar2/pyqt5-modelview-tutorial/blob/master/modelview_3.py # this trick is used to work around the issue of updating UI from background threads -- i.e. events # raised by HubClient. timer = QtCore.QTimer(self) timer.setInterval(200) timer.timeout.connect(self.refresh) timer.start() def refresh(self): is_connected = self._client.state == ConnectionState.TELEMETRY is_connected_usb = is_connected and self._hub_monitor.status.is_usb_connected self.list_button.setEnabled(is_connected_usb) self.position_widget.refresh() self.motion_widget.refresh() self.port_widget.refresh() self.program_widget.refresh() def list_programs(self): storage_status = self._client.get_storage_status() if storage_status is not None: list_programs(storage_status) def run_program(self): slot = 4 r = self._client.program_execute(slot) print('Program execute returns: ', r) logger.info("LEGO status app starting up") hc = HubClient() monitor = HubMonitor(hc) monitor.logger = ProgramHubLogger('logs/program') app = QApplication(sys.argv) window = MainWindow(hc, monitor) window.setWindowTitle('LEGO Hub Status') window.show() hc.start() sys.exit(app.exec_())
33.823232
119
0.667164
768
6,697
5.622396
0.283854
0.048634
0.039602
0.013201
0.106299
0.065076
0.044928
0.029643
0.029643
0.029643
0
0.009049
0.224429
6,697
197
120
33.994924
0.822295
0.039869
0
0.125
0
0
0.068804
0
0
0
0
0
0
1
0.078947
false
0
0.131579
0
0.230263
0.032895
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a72d886218147f91e76b4f7f571b23929432026
966
py
Python
tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py
samysweb/dnnv
58fb95b7300914d9da28eed86c39eca473b1aaef
[ "MIT" ]
5
2022-01-28T20:30:34.000Z
2022-03-17T09:26:52.000Z
tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py
samysweb/dnnv
58fb95b7300914d9da28eed86c39eca473b1aaef
[ "MIT" ]
9
2022-01-27T03:50:28.000Z
2022-02-08T18:42:17.000Z
tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py
samysweb/dnnv
58fb95b7300914d9da28eed86c39eca473b1aaef
[ "MIT" ]
2
2022-02-03T17:32:43.000Z
2022-03-24T16:38:49.000Z
import numpy as np from dnnv.nn.converters.tensorflow import * from dnnv.nn.operations import * TOL = 1e-6 def test_Dropout_consts(): x = np.array([3, 4]).astype(np.float32) op = Dropout(x) tf_op = TensorflowConverter().visit(op) result_ = tf_op() assert isinstance(result_, tuple) assert len(result_) == 2 result, none = result_ assert none is None y = np.array([3, 4]).astype(np.float32) assert np.all(result >= (y - TOL)) assert np.all(result <= (y + TOL)) def test_Dropout_x_is_op(): x = np.array([3, 4]).astype(np.float32) input_op = Input((2,), np.dtype(np.float32)) op = Dropout(input_op) tf_op = TensorflowConverter().visit(op) result_ = tf_op(x) assert isinstance(result_, tuple) assert len(result_) == 2 result, none = result_ assert none is None y = np.array([3, 4]).astype(np.float32) assert np.all(result >= (y - TOL)) assert np.all(result <= (y + TOL))
26.108108
48
0.63354
146
966
4.054795
0.267123
0.076014
0.054054
0.060811
0.699324
0.699324
0.699324
0.699324
0.47973
0.47973
0
0.030423
0.217391
966
36
49
26.833333
0.752646
0
0
0.62069
0
0
0
0
0
0
0
0
0.344828
1
0.068966
false
0
0.103448
0
0.172414
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a7328e83cbca070a32d28d91e1af148c593184e
4,202
py
Python
smarts/zoo/worker.py
idsc-frazzoli/SMARTS
bae0a6ea160330921edc94a7161a4e8cf72a1974
[ "MIT" ]
554
2020-10-16T02:30:35.000Z
2022-03-29T14:13:00.000Z
smarts/zoo/worker.py
idsc-frazzoli/SMARTS
bae0a6ea160330921edc94a7161a4e8cf72a1974
[ "MIT" ]
917
2020-10-17T00:10:31.000Z
2022-03-31T23:00:47.000Z
smarts/zoo/worker.py
idsc-frazzoli/SMARTS
bae0a6ea160330921edc94a7161a4e8cf72a1974
[ "MIT" ]
135
2020-10-20T01:44:49.000Z
2022-03-27T04:51:31.000Z
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Run an agent in it's own (independent) process. What Agent code does is out of our direct control, we want to avoid any interactions with global state that might be present in the SMARTS process. To protect and isolate Agents from any pollution of global state in the main SMARTS process, we spawn Agents in their fresh and independent python process. This script is called from within SMARTS to instantiate a remote agent. The protocal is as follows: 1. SMARTS calls: worker.py --port 5467 # sets a unique port per agent 2. worker.py will begin listening on port 5467. 3. SMARTS connects to (ip, 5467) as a client. 4. SMARTS calls `build()` rpc with `AgentSpec` as input. 5. worker.py recieves the `AgentSpec` instances and builds the Agent. 6. SMARTS calls `act()` rpc with observation as input and receives the actions as response from worker.py. """ import argparse import importlib import logging import os import signal import sys from concurrent import futures import grpc from smarts.zoo import worker_pb2_grpc, worker_servicer # Front-load some expensive imports as to not block the simulation modules = [ "smarts.core.utils.pybullet", "smarts.core.utils.sumo", "smarts.core.sumo_road_network", "numpy", "sklearn", "shapely", "scipy", "trimesh", "panda3d", "gym", "ray", ] for mod in modules: try: importlib.import_module(mod) except ImportError: if mod == "ray": print( "You need to install the ray dependency using pip install -e .[train] first" ) if mod == "panda3d": print( "You need to install the panda3d dependency using pip install -e .[camera-obs] first" ) pass # End front-loaded imports logging.basicConfig(level=logging.INFO) log = logging.getLogger(f"worker.py - pid({os.getpid()})") def serve(port): ip = "[::]" server = grpc.server(futures.ThreadPoolExecutor(max_workers=1)) worker_pb2_grpc.add_WorkerServicer_to_server( worker_servicer.WorkerServicer(), server ) server.add_insecure_port(f"{ip}:{port}") server.start() log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Started serving.") def stop_server(unused_signum, unused_frame): server.stop(0) log.debug( f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Received interrupt signal." ) # Catch keyboard interrupt and terminate signal signal.signal(signal.SIGINT, stop_server) signal.signal(signal.SIGTERM, stop_server) # Wait to receive server termination signal server.wait_for_termination() log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Server exited") if __name__ == "__main__": parser = argparse.ArgumentParser("Run an agent in an independent process.") parser.add_argument( "--port", type=int, required=True, help="Port to listen for remote client connections.", ) args = parser.parse_args() serve(args.port)
34.442623
155
0.702285
592
4,202
4.930743
0.466216
0.030147
0.015074
0.015416
0.073313
0.055498
0.039054
0.039054
0.039054
0.039054
0
0.008701
0.206806
4,202
121
156
34.727273
0.867087
0.505474
0
0.030769
0
0
0.314941
0.037598
0
0
0
0
0
1
0.030769
false
0.015385
0.169231
0
0.2
0.030769
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a744ccf0662773e4f40dc632d9bd05f720ada5c
2,846
py
Python
week2/problems/problem2.py
Nburkhal/mit-cs250
a3d32a217deb2cfa1b94d8188bef73c0742b1245
[ "MIT" ]
null
null
null
week2/problems/problem2.py
Nburkhal/mit-cs250
a3d32a217deb2cfa1b94d8188bef73c0742b1245
[ "MIT" ]
null
null
null
week2/problems/problem2.py
Nburkhal/mit-cs250
a3d32a217deb2cfa1b94d8188bef73c0742b1245
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Now write a program that calculates the minimum fixed monthly payment needed in order pay off a credit card balance within 12 months. By a fixed monthly payment, we mean a single number which does not change each month, but instead is a constant amount that will be paid each month. In this problem, we will not be dealing with a minimum monthly payment rate. The following variables contain values as described below: balance - the outstanding balance on the credit card annualInterestRate - annual interest rate as a decimal The program should print out one line: the lowest monthly payment that will pay off all debt in under 1 year, for example: Lowest Payment: 180 Assume that the interest is compounded monthly according to the balance at the end of the month (after the payment for that month is made). The monthly payment must be a multiple of $10 and is the same for all months. Notice that it is possible for the balance to become negative using this payment scheme, which is okay. A summary of the required math is found below: Monthly interest rate = (Annual interest rate) / 12.0 Monthly unpaid balance = (Previous balance) - (Minimum fixed monthly payment) Updated balance each month = (Monthly unpaid balance) + (Monthly interest rate x Monthly unpaid balance) Test Case 1: balance = 3329 annualInterestRate = 0.2 Result Your Code Should Generate: ------------------- Lowest Payment: 310 Test Case 2: balance = 4773 annualInterestRate = 0.2 Result Your Code Should Generate: ------------------- Lowest Payment: 440 Test Case 3: balance = 3926 annualInterestRate = 0.2 Result Your Code Should Generate: ------------------- Lowest Payment: 360 """ # Establish variables that we know / needed for the evaluation. # Counter optional balance = 3329 annualInterestRate = 0.2 monthlyInterestRate = annualInterestRate / 12 monthlyPayment = 0 updatedBalance = balance counter = 0 # Will loop through everything until we find a rate that will reduce updatedBalance to 0. while updatedBalance > 0: # Was stated that payments needed to happen in increments of $10 monthlyPayment += 10 # To reset balance back to actual balance when loop inevitably fails. updatedBalance = balance month = 1 # For 12 months and while balance is not 0... while month <= 12 and updatedBalance > 0: # Subtract the ($10*n) amount updatedBalance -= monthlyPayment # Compound the interest AFTER making monthly payment interest = monthlyInterestRate * updatedBalance updatedBalance += interest # Increase month counter month += 1 counter += 1 print("Lowest Payment: ", monthlyPayment) print("Number of iterations: ", counter)
37.447368
104
0.713282
389
2,846
5.218509
0.403599
0.048276
0.039409
0.038424
0.110837
0.090148
0.090148
0.090148
0.090148
0.090148
0
0.031475
0.218552
2,846
75
105
37.946667
0.881295
0.784961
0
0.111111
0
0
0.063228
0
0
0
0
0
0
1
0
false
0
0
0
0
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a74aa2ca33901c3b2b2f9d3fd978d06054719fb
2,410
py
Python
leetcode_python/Sort/sort-characters-by-frequency.py
yennanliu/CS_basics
3c50c819897a572ff38179bfb0083a19b2325fde
[ "Unlicense" ]
18
2019-08-01T07:45:02.000Z
2022-03-31T18:05:44.000Z
leetcode_python/Sort/sort-characters-by-frequency.py
yennanliu/CS_basics
3c50c819897a572ff38179bfb0083a19b2325fde
[ "Unlicense" ]
null
null
null
leetcode_python/Sort/sort-characters-by-frequency.py
yennanliu/CS_basics
3c50c819897a572ff38179bfb0083a19b2325fde
[ "Unlicense" ]
15
2019-12-29T08:46:20.000Z
2022-03-08T14:14:05.000Z
# V0 import collections class Solution(object): def frequencySort(self, s): count = collections.Counter(s) count_dict = dict(count) count_tuple_sorted = sorted(count_dict.items(), key=lambda kv : -kv[1]) res = '' for item in count_tuple_sorted: res += item[0] * item[1] return res # V0' # IDEA : collections.Counter(s).most_common class Solution(object): def frequencySort(self, s): return ''.join(c * t for c, t in collections.Counter(s).most_common()) # V1 # IDEA : SORT # https://blog.csdn.net/fuxuemingzhu/article/details/79437548 import collections class Solution(object): def frequencySort(self, s): """ :type s: str :rtype: str """ count = collections.Counter(s).most_common() res = '' for c, v in count: res += c * v return res ### Test case: s=Solution() assert s.frequencySort(['a','b','c','c']) == 'ccab' assert s.frequencySort(['a']) == 'a' assert s.frequencySort(['a','A','c','c']) == 'ccaA' assert s.frequencySort(['c','c','c']) == 'ccc' assert s.frequencySort([]) == '' assert s.frequencySort(['','','']) == '' # V1' # http://bookshadow.com/weblog/2016/11/02/leetcode-sort-characters-by-frequency/ class Solution(object): def frequencySort(self, s): """ :type s: str :rtype: str """ return ''.join(c * t for c, t in collections.Counter(s).most_common()) # V2 import collections class Solution(object): def frequencySort(self, s): # sort Counter by value # https://stackoverflow.com/questions/20950650/how-to-sort-counter-by-value-python s_freq_dict = collections.Counter(s).most_common() output = '' for i in range(len(s_freq_dict)): output = output + (s_freq_dict[i][0]*s_freq_dict[i][1]) return output # V2' # Time: O(n) # Space: O(n) import collections class Solution(object): def frequencySort(self, s): """ :type s: str :rtype: str """ freq = collections.defaultdict(int) for c in s: freq[c] += 1 counts = [""] * (len(s)+1) for c in freq: counts[freq[c]] += c result = "" for count in reversed(range(len(counts)-1)): for c in counts[count]: result += c * count return result
27.078652
87
0.568465
309
2,410
4.372168
0.281553
0.057735
0.084382
0.097705
0.431532
0.334567
0.334567
0.304959
0.304959
0.220577
0
0.021702
0.273444
2,410
88
88
27.386364
0.749857
0.178838
0
0.415094
0
0
0.012841
0
0
0
0
0
0.113208
1
0.113208
false
0
0.075472
0.018868
0.415094
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a7a250cd753510b2923ce0ec46a2aae0ee1d50c
1,028
py
Python
scraper/news/spiders/millardayo.py
ZendaInnocent/news-api
71465aea50e0b1cea08a421d72156cbe7ed8a952
[ "Apache-2.0" ]
3
2021-11-15T08:43:53.000Z
2021-11-15T19:44:56.000Z
scraper/news/spiders/millardayo.py
ZendaInnocent/news-api
71465aea50e0b1cea08a421d72156cbe7ed8a952
[ "Apache-2.0" ]
null
null
null
scraper/news/spiders/millardayo.py
ZendaInnocent/news-api
71465aea50e0b1cea08a421d72156cbe7ed8a952
[ "Apache-2.0" ]
1
2021-11-15T08:43:58.000Z
2021-11-15T08:43:58.000Z
# Spider for MillardAyo.com import scrapy from bs4 import BeautifulSoup class MillardAyoSpider(scrapy.Spider): name = 'millardayo' allowed_urls = ['www.millardayo.com'] start_urls = [ 'https://millardayo.com', ] def parse(self, response, **kwargs): # extracting data - link, image, title, excerpt soup = BeautifulSoup(response.body, 'lxml') posts = soup.find_all('li', {'class': 'infinite-post'}) for post in posts: try: yield { 'image_url': post.find('img').get('src'), 'link': post.find('a').get('href'), 'title': post.find('a').get('title'), 'excerpt': post.find('p').get_text(), 'source': 'millardayo', } except AttributeError: pass next_page = soup.find('a', text='Next ›').get('href') if next_page: yield response.follow(next_page, callback=self.parse)
27.783784
65
0.521401
107
1,028
4.943925
0.542056
0.060491
0.034026
0.045369
0
0
0
0
0
0
0
0.001462
0.33463
1,028
36
66
28.555556
0.770468
0.069066
0
0
0
0
0.150943
0
0
0
0
0
0
1
0.04
false
0.04
0.08
0
0.28
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a7c09860b07db2134a799e024cf2d3ffbf7dc17
11,429
py
Python
python/tvm/contrib/nvcc.py
ntanhbk44/tvm
f89a929f09f7a0b0ccd0f4d46dc2b1c562839087
[ "Zlib", "Unlicense", "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0" ]
null
null
null
python/tvm/contrib/nvcc.py
ntanhbk44/tvm
f89a929f09f7a0b0ccd0f4d46dc2b1c562839087
[ "Zlib", "Unlicense", "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0" ]
null
null
null
python/tvm/contrib/nvcc.py
ntanhbk44/tvm
f89a929f09f7a0b0ccd0f4d46dc2b1c562839087
[ "Zlib", "Unlicense", "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name """Utility to invoke nvcc compiler in the system""" from __future__ import absolute_import as _abs import subprocess import os import warnings import tvm._ffi from tvm.runtime import ndarray as nd from . import utils from .._ffi.base import py_str def compile_cuda(code, target="ptx", arch=None, options=None, path_target=None): """Compile cuda code with NVCC from env. Parameters ---------- code : str The cuda code. target : str The target format arch : str The architecture options : str or list of str The additional options path_target : str, optional Output file. Return ------ cubin : bytearray The bytearray of the cubin """ temp = utils.tempdir() if target not in ["cubin", "ptx", "fatbin"]: raise ValueError("target must be in cubin, ptx, fatbin") temp_code = temp.relpath("my_kernel.cu") temp_target = temp.relpath("my_kernel.%s" % target) with open(temp_code, "w") as out_file: out_file.write(code) if arch is None: if nd.gpu(0).exist: # auto detect the compute arch argument arch = "sm_" + "".join(nd.gpu(0).compute_version.split(".")) else: raise ValueError("arch(sm_xy) is not passed, and we cannot detect it from env") file_target = path_target if path_target else temp_target cmd = ["nvcc"] cmd += ["--%s" % target, "-O3"] if isinstance(arch, list): cmd += arch else: cmd += ["-arch", arch] if options: if isinstance(options, str): cmd += [options] elif isinstance(options, list): cmd += options else: raise ValueError("options must be str or list of str") cmd += ["-o", file_target] cmd += [temp_code] # NOTE: ccbin option can be used to tell nvcc where to find the c++ compiler # just in case it is not in the path. On Windows it is not in the path by default. # However, we cannot use TVM_CXX_COMPILER_PATH because the runtime env. # Because it is hard to do runtime compiler detection, we require nvcc is configured # correctly by default. # if cxx_compiler_path != "": # cmd += ["-ccbin", cxx_compiler_path] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() if proc.returncode != 0: msg = code msg += "\nCompilation error:\n" msg += py_str(out) raise RuntimeError(msg) data = bytearray(open(file_target, "rb").read()) if not data: raise RuntimeError("Compilation error: empty result is generated") return data def find_cuda_path(): """Utility function to find cuda path Returns ------- path : str Path to cuda root. """ if "CUDA_PATH" in os.environ: return os.environ["CUDA_PATH"] cmd = ["which", "nvcc"] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() out = py_str(out) if proc.returncode == 0: return os.path.realpath(os.path.join(str(out).strip(), "../..")) cuda_path = "/usr/local/cuda" if os.path.exists(os.path.join(cuda_path, "bin/nvcc")): return cuda_path raise RuntimeError("Cannot find cuda path") def get_cuda_version(cuda_path): """Utility function to get cuda version Parameters ---------- cuda_path : str Path to cuda root. Returns ------- version : float The cuda version """ version_file_path = os.path.join(cuda_path, "version.txt") if not os.path.exists(version_file_path): # Debian/Ubuntu repackaged CUDA path version_file_path = os.path.join(cuda_path, "lib", "cuda", "version.txt") try: with open(version_file_path) as f: version_str = f.readline().replace("\n", "").replace("\r", "") return float(version_str.split(" ")[2][:2]) except FileNotFoundError: pass cmd = [os.path.join(cuda_path, "bin", "nvcc"), "--version"] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() out = py_str(out) if proc.returncode == 0: release_line = [l for l in out.split("\n") if "release" in l][0] release_fields = [s.strip() for s in release_line.split(",")] release_version = [f[1:] for f in release_fields if f.startswith("V")][0] major_minor = ".".join(release_version.split(".")[:2]) return float(major_minor) raise RuntimeError("Cannot read cuda version file") @tvm._ffi.register_func("tvm_callback_libdevice_path") def find_libdevice_path(arch): """Utility function to find libdevice Parameters ---------- arch : int The compute architecture in int Returns ------- path : str Path to libdevice. """ cuda_path = find_cuda_path() lib_path = os.path.join(cuda_path, "nvvm/libdevice") if not os.path.exists(lib_path): # Debian/Ubuntu repackaged CUDA path lib_path = os.path.join(cuda_path, "lib/nvidia-cuda-toolkit/libdevice") selected_ver = 0 selected_path = None cuda_ver = get_cuda_version(cuda_path) if cuda_ver in (9.0, 9.1, 10.0, 10.1, 10.2, 11.0, 11.1, 11.2): path = os.path.join(lib_path, "libdevice.10.bc") else: for fn in os.listdir(lib_path): if not fn.startswith("libdevice"): continue ver = int(fn.split(".")[-3].split("_")[-1]) if selected_ver < ver <= arch: selected_ver = ver selected_path = fn if selected_path is None: raise RuntimeError("Cannot find libdevice for arch {}".format(arch)) path = os.path.join(lib_path, selected_path) return path def callback_libdevice_path(arch): try: return find_libdevice_path(arch) except RuntimeError: warnings.warn("Cannot find libdevice path") return "" def get_target_compute_version(target=None): """Utility function to get compute capability of compilation target. Looks for the arch in three different places, first in the target attributes, then the global scope, and finally the GPU device (if it exists). Parameters ---------- target : tvm.target.Target, optional The compilation target Returns ------- compute_version : str compute capability of a GPU (e.g. "8.0") """ # 1. Target if target: if "arch" in target.attrs: compute_version = target.attrs["arch"] major, minor = compute_version.split("_")[1] return major + "." + minor # 2. Global scope from tvm.autotvm.env import AutotvmGlobalScope # pylint: disable=import-outside-toplevel if AutotvmGlobalScope.current.cuda_target_arch: major, minor = AutotvmGlobalScope.current.cuda_target_arch.split("_")[1] return major + "." + minor # 3. GPU if tvm.gpu(0).exist: return tvm.gpu(0).compute_version warnings.warn( "No CUDA architecture was specified or GPU detected." "Try specifying it by adding '-arch=sm_xx' to your target." ) return None def parse_compute_version(compute_version): """Parse compute capability string to divide major and minor version Parameters ---------- compute_version : str compute capability of a GPU (e.g. "6.0") Returns ------- major : int major version number minor : int minor version number """ split_ver = compute_version.split(".") try: major = int(split_ver[0]) minor = int(split_ver[1]) return major, minor except (IndexError, ValueError) as err: # pylint: disable=raise-missing-from raise RuntimeError("Compute version parsing error: " + str(err)) def have_fp16(compute_version): """Either fp16 support is provided in the compute capability or not Parameters ---------- compute_version: str compute capability of a GPU (e.g. "6.0") """ major, minor = parse_compute_version(compute_version) # fp 16 support in reference to: # https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions if major == 5 and minor == 3: return True if major >= 6: return True return False def have_int8(compute_version): """Either int8 support is provided in the compute capability or not Parameters ---------- compute_version : str compute capability of a GPU (e.g. "6.1") """ major, _ = parse_compute_version(compute_version) if major >= 6: return True return False def have_tensorcore(compute_version=None, target=None): """Either TensorCore support is provided in the compute capability or not Parameters ---------- compute_version : str, optional compute capability of a GPU (e.g. "7.0"). target : tvm.target.Target, optional The compilation target, will be used to determine arch if compute_version isn't specified. """ if compute_version is None: if tvm.gpu(0).exist: compute_version = tvm.gpu(0).compute_version else: if target is None or "arch" not in target.attrs: warnings.warn( "Tensorcore will be disabled due to no CUDA architecture specified." "Try specifying it by adding '-arch=sm_xx' to your target." ) return False compute_version = target.attrs["arch"] # Compute version will be in the form "sm_{major}{minor}" major, minor = compute_version.split("_")[1] compute_version = major + "." + minor major, _ = parse_compute_version(compute_version) if major >= 7: return True return False def have_cudagraph(): """Either CUDA Graph support is provided""" try: cuda_path = find_cuda_path() cuda_ver = get_cuda_version(cuda_path) if cuda_ver < 10.0: return False return True except RuntimeError: return False def have_bf16(compute_version): """Either bf16 support is provided in the compute capability or not Parameters ---------- compute_version : str compute capability of a GPU (e.g. "8.0") """ major, _ = parse_compute_version(compute_version) if major >= 8: return True return False
30.155673
97
0.627351
1,498
11,429
4.668224
0.211615
0.07007
0.01287
0.012012
0.315744
0.244959
0.207207
0.195624
0.153868
0.134134
0
0.010455
0.26354
11,429
378
98
30.23545
0.820364
0.329863
0
0.305556
0
0
0.121031
0.008318
0.005556
0
0
0
0
1
0.066667
false
0.011111
0.05
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a7d299369e55fc318f13ff176616da2592dab8c
526
py
Python
Python/17 - 081 - extraindo dados de uma lista.py
matheusguerreiro/python
f39a1b92409f11cbe7fef5d9261f863f9e0fac0d
[ "MIT" ]
null
null
null
Python/17 - 081 - extraindo dados de uma lista.py
matheusguerreiro/python
f39a1b92409f11cbe7fef5d9261f863f9e0fac0d
[ "MIT" ]
null
null
null
Python/17 - 081 - extraindo dados de uma lista.py
matheusguerreiro/python
f39a1b92409f11cbe7fef5d9261f863f9e0fac0d
[ "MIT" ]
null
null
null
# Aula 17 (Listas (Parte 1)) valores = [] while True: valor = int(input('Digite um Valor ou -1 para Finalizar: ')) if valor < 0: print('\nFinalizando...') break else: valores.append(valor) print(f'Foram digitados {len(valores)} números') valores.sort(reverse=True) print(f'Lista ordenada de forma decrescente: {valores}') if 5 in valores: valores.reverse() print(f'O valor 5 foi digitado e está na {valores.index(5)} posição.') else: print('Valor 5 não encontrado na lista.')
26.3
74
0.652091
75
526
4.573333
0.626667
0.052478
0
0
0
0
0
0
0
0
0
0.021739
0.212928
526
19
75
27.684211
0.806763
0.04943
0
0.125
0
0
0.461847
0
0
0
0
0
0
1
0
false
0
0
0
0
0.3125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a7e7d0b939c716cda0bb6e7629a5a7ce8b56ac7
10,911
py
Python
python/pyarrow/tests/test_compute.py
kylebrandt/arrow
515197dfe6e83d6fa6fe82bfec134f41b222b748
[ "Apache-2.0" ]
null
null
null
python/pyarrow/tests/test_compute.py
kylebrandt/arrow
515197dfe6e83d6fa6fe82bfec134f41b222b748
[ "Apache-2.0" ]
null
null
null
python/pyarrow/tests/test_compute.py
kylebrandt/arrow
515197dfe6e83d6fa6fe82bfec134f41b222b748
[ "Apache-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import pytest import pyarrow as pa import pyarrow.compute all_array_types = [ ('bool', [True, False, False, True, True]), ('uint8', np.arange(5)), ('int8', np.arange(5)), ('uint16', np.arange(5)), ('int16', np.arange(5)), ('uint32', np.arange(5)), ('int32', np.arange(5)), ('uint64', np.arange(5, 10)), ('int64', np.arange(5, 10)), ('float', np.arange(0, 0.5, 0.1)), ('double', np.arange(0, 0.5, 0.1)), ('string', ['a', 'b', None, 'ddd', 'ee']), ('binary', [b'a', b'b', b'c', b'ddd', b'ee']), (pa.binary(3), [b'abc', b'bcd', b'cde', b'def', b'efg']), (pa.list_(pa.int8()), [[1, 2], [3, 4], [5, 6], None, [9, 16]]), (pa.large_list(pa.int16()), [[1], [2, 3, 4], [5, 6], None, [9, 16]]), (pa.struct([('a', pa.int8()), ('b', pa.int8())]), [ {'a': 1, 'b': 2}, None, {'a': 3, 'b': 4}, None, {'a': 5, 'b': 6}]), ] numerical_arrow_types = [ pa.int8(), pa.int16(), pa.int64(), pa.uint8(), pa.uint16(), pa.uint64(), pa.float32(), pa.float64() ] @pytest.mark.parametrize('arrow_type', numerical_arrow_types) def test_sum_array(arrow_type): arr = pa.array([1, 2, 3, 4], type=arrow_type) assert arr.sum() == 10 assert pa.compute.sum(arr) == 10 arr = pa.array([], type=arrow_type) assert arr.sum() == None # noqa: E711 assert pa.compute.sum(arr) == None # noqa: E711 @pytest.mark.parametrize('arrow_type', numerical_arrow_types) def test_sum_chunked_array(arrow_type): arr = pa.chunked_array([pa.array([1, 2, 3, 4], type=arrow_type)]) assert pa.compute.sum(arr) == 10 arr = pa.chunked_array([ pa.array([1, 2], type=arrow_type), pa.array([3, 4], type=arrow_type) ]) assert pa.compute.sum(arr) == 10 arr = pa.chunked_array([ pa.array([1, 2], type=arrow_type), pa.array([], type=arrow_type), pa.array([3, 4], type=arrow_type) ]) assert pa.compute.sum(arr) == 10 arr = pa.chunked_array((), type=arrow_type) print(arr, type(arr)) assert arr.num_chunks == 0 assert pa.compute.sum(arr) == None # noqa: E711 @pytest.mark.parametrize(('ty', 'values'), all_array_types) def test_take(ty, values): arr = pa.array(values, type=ty) for indices_type in [pa.int8(), pa.int64()]: indices = pa.array([0, 4, 2, None], type=indices_type) result = arr.take(indices) result.validate() expected = pa.array([values[0], values[4], values[2], None], type=ty) assert result.equals(expected) # empty indices indices = pa.array([], type=indices_type) result = arr.take(indices) result.validate() expected = pa.array([], type=ty) assert result.equals(expected) indices = pa.array([2, 5]) with pytest.raises(IndexError): arr.take(indices) indices = pa.array([2, -1]) with pytest.raises(IndexError): arr.take(indices) def test_take_indices_types(): arr = pa.array(range(5)) for indices_type in ['uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64']: indices = pa.array([0, 4, 2, None], type=indices_type) result = arr.take(indices) result.validate() expected = pa.array([0, 4, 2, None]) assert result.equals(expected) for indices_type in [pa.float32(), pa.float64()]: indices = pa.array([0, 4, 2], type=indices_type) with pytest.raises(NotImplementedError): arr.take(indices) @pytest.mark.parametrize('ordered', [False, True]) def test_take_dictionary(ordered): arr = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], ordered=ordered) result = arr.take(pa.array([0, 1, 3])) result.validate() assert result.to_pylist() == ['a', 'b', 'a'] assert result.dictionary.to_pylist() == ['a', 'b', 'c'] assert result.type.ordered is ordered @pytest.mark.parametrize(('ty', 'values'), all_array_types) def test_filter(ty, values): arr = pa.array(values, type=ty) mask = pa.array([True, False, False, True, None]) result = arr.filter(mask, null_selection_behavior='drop') result.validate() assert result.equals(pa.array([values[0], values[3]], type=ty)) result = arr.filter(mask, null_selection_behavior='emit_null') result.validate() assert result.equals(pa.array([values[0], values[3], None], type=ty)) # non-boolean dtype mask = pa.array([0, 1, 0, 1, 0]) with pytest.raises(NotImplementedError): arr.filter(mask) # wrong length mask = pa.array([True, False, True]) with pytest.raises(ValueError, match="must all be the same length"): arr.filter(mask) def test_filter_chunked_array(): arr = pa.chunked_array([["a", None], ["c", "d", "e"]]) expected_drop = pa.chunked_array([["a"], ["e"]]) expected_null = pa.chunked_array([["a"], [None, "e"]]) for mask in [ # mask is array pa.array([True, False, None, False, True]), # mask is chunked array pa.chunked_array([[True, False, None], [False, True]]), # mask is python object [True, False, None, False, True] ]: result = arr.filter(mask) assert result.equals(expected_drop) result = arr.filter(mask, null_selection_behavior="emit_null") assert result.equals(expected_null) def test_filter_record_batch(): batch = pa.record_batch( [pa.array(["a", None, "c", "d", "e"])], names=["a'"]) # mask is array mask = pa.array([True, False, None, False, True]) result = batch.filter(mask) expected = pa.record_batch([pa.array(["a", "e"])], names=["a'"]) assert result.equals(expected) result = batch.filter(mask, null_selection_behavior="emit_null") expected = pa.record_batch([pa.array(["a", None, "e"])], names=["a'"]) assert result.equals(expected) def test_filter_table(): table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"]) expected_drop = pa.table([pa.array(["a", "e"])], names=["a"]) expected_null = pa.table([pa.array(["a", None, "e"])], names=["a"]) for mask in [ # mask is array pa.array([True, False, None, False, True]), # mask is chunked array pa.chunked_array([[True, False], [None, False, True]]), # mask is python object [True, False, None, False, True] ]: result = table.filter(mask) assert result.equals(expected_drop) result = table.filter(mask, null_selection_behavior="emit_null") assert result.equals(expected_null) def test_filter_errors(): arr = pa.chunked_array([["a", None], ["c", "d", "e"]]) batch = pa.record_batch( [pa.array(["a", None, "c", "d", "e"])], names=["a'"]) table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"]) for obj in [arr, batch, table]: # non-boolean dtype mask = pa.array([0, 1, 0, 1, 0]) with pytest.raises(NotImplementedError): obj.filter(mask) # wrong length mask = pa.array([True, False, True]) with pytest.raises(pa.ArrowInvalid, match="must all be the same length"): obj.filter(mask) @pytest.mark.parametrize("typ", ["array", "chunked_array"]) def test_compare_array(typ): if typ == "array": def con(values): return pa.array(values) else: def con(values): return pa.chunked_array([values]) arr1 = con([1, 2, 3, 4, None]) arr2 = con([1, 1, 4, None, 4]) result = arr1 == arr2 assert result.equals(con([True, False, False, None, None])) result = arr1 != arr2 assert result.equals(con([False, True, True, None, None])) result = arr1 < arr2 assert result.equals(con([False, False, True, None, None])) result = arr1 <= arr2 assert result.equals(con([True, False, True, None, None])) result = arr1 > arr2 assert result.equals(con([False, True, False, None, None])) result = arr1 >= arr2 assert result.equals(con([True, True, False, None, None])) @pytest.mark.parametrize("typ", ["array", "chunked_array"]) def test_compare_scalar(typ): if typ == "array": def con(values): return pa.array(values) else: def con(values): return pa.chunked_array([values]) arr = con([1, 2, 3, None]) # TODO this is a hacky way to construct a scalar .. scalar = pa.array([2]).sum() result = arr == scalar assert result.equals(con([False, True, False, None])) result = arr != scalar assert result.equals(con([True, False, True, None])) result = arr < scalar assert result.equals(con([True, False, False, None])) result = arr <= scalar assert result.equals(con([True, True, False, None])) result = arr > scalar assert result.equals(con([False, False, True, None])) result = arr >= scalar assert result.equals(con([False, True, True, None])) def test_compare_chunked_array_mixed(): arr = pa.array([1, 2, 3, 4, None]) arr_chunked = pa.chunked_array([[1, 2, 3], [4, None]]) arr_chunked2 = pa.chunked_array([[1, 2], [3, 4, None]]) expected = pa.chunked_array([[True, True, True, True, None]]) for result in [ arr == arr_chunked, arr_chunked == arr, arr_chunked == arr_chunked2, ]: assert result.equals(expected) def test_arithmetic_add(): left = pa.array([1, 2, 3, 4, 5]) right = pa.array([0, -1, 1, 2, 3]) result = pa.compute.add(left, right) expected = pa.array([1, 1, 4, 6, 8]) assert result.equals(expected) def test_arithmetic_subtract(): left = pa.array([1, 2, 3, 4, 5]) right = pa.array([0, -1, 1, 2, 3]) result = pa.compute.subtract(left, right) expected = pa.array([1, 3, 2, 2, 2]) assert result.equals(expected) def test_arithmetic_multiply(): left = pa.array([1, 2, 3, 4, 5]) right = pa.array([0, -1, 1, 2, 3]) result = pa.compute.multiply(left, right) expected = pa.array([0, -2, 3, 8, 15]) assert result.equals(expected)
32.281065
77
0.59967
1,548
10,911
4.147287
0.133075
0.056698
0.075701
0.052648
0.657477
0.626324
0.591745
0.502804
0.462928
0.430841
0
0.031858
0.223261
10,911
337
78
32.376855
0.725664
0.095408
0
0.377119
0
0
0.040155
0
0
0
0
0.002967
0.165254
1
0.084746
false
0
0.016949
0.016949
0.101695
0.004237
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a7ef047892a808f5b9e319a809d26915f83c93f
2,207
py
Python
openmdao/solvers/nonlinear/nonlinear_block_jac.py
bollwyvl/OpenMDAO
4d7a31b2bb39674e2be0d6a13cbe22de3f5353af
[ "Apache-2.0" ]
null
null
null
openmdao/solvers/nonlinear/nonlinear_block_jac.py
bollwyvl/OpenMDAO
4d7a31b2bb39674e2be0d6a13cbe22de3f5353af
[ "Apache-2.0" ]
null
null
null
openmdao/solvers/nonlinear/nonlinear_block_jac.py
bollwyvl/OpenMDAO
4d7a31b2bb39674e2be0d6a13cbe22de3f5353af
[ "Apache-2.0" ]
1
2018-07-27T06:39:15.000Z
2018-07-27T06:39:15.000Z
"""Define the NonlinearBlockJac class.""" from openmdao.recorders.recording_iteration_stack import Recording from openmdao.solvers.solver import NonlinearSolver from openmdao.utils.mpi import multi_proc_fail_check class NonlinearBlockJac(NonlinearSolver): """ Nonlinear block Jacobi solver. """ SOLVER = 'NL: NLBJ' def _single_iteration(self): """ Perform the operations in the iteration loop. """ system = self._system self._solver_info.append_subsolver() system._transfer('nonlinear', 'fwd') with Recording('NonlinearBlockJac', 0, self) as rec: # If this is a parallel group, check for analysis errors and reraise. if len(system._subsystems_myproc) != len(system._subsystems_allprocs): with multi_proc_fail_check(system.comm): for subsys in system._subsystems_myproc: subsys._solve_nonlinear() else: for subsys in system._subsystems_myproc: subsys._solve_nonlinear() system._check_child_reconf() rec.abs = 0.0 rec.rel = 0.0 self._solver_info.pop() def _mpi_print_header(self): """ Print header text before solving. """ if (self.options['iprint'] > 0): pathname = self._system.pathname if pathname: nchar = len(pathname) prefix = self._solver_info.prefix header = prefix + "\n" header += prefix + nchar * "=" + "\n" header += prefix + pathname + "\n" header += prefix + nchar * "=" print(header) def _run_apply(self): """ Run the apply_nonlinear method on the system. """ system = self._system # If this is a parallel group, check for analysis errors and reraise. if len(system._subsystems_myproc) != len(system._subsystems_allprocs): with multi_proc_fail_check(system.comm): super(NonlinearBlockJac, self)._run_apply() else: super(NonlinearBlockJac, self)._run_apply()
32.940299
82
0.584957
229
2,207
5.406114
0.358079
0.077544
0.061389
0.043619
0.366721
0.311793
0.311793
0.311793
0.311793
0.226171
0
0.004024
0.324422
2,207
66
83
33.439394
0.826291
0.149071
0
0.358974
0
0
0.028539
0
0
0
0
0
0
1
0.076923
false
0
0.076923
0
0.205128
0.076923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a7ef877f9a75af565239a4f498da3558863fc35
7,766
py
Python
tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py
Smokrow/tensorflow
debd66dae1c9a49d36ea006c97facf06b4ac25cb
[ "Apache-2.0" ]
1
2018-09-08T08:26:31.000Z
2018-09-08T08:26:31.000Z
tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py
Smokrow/tensorflow
debd66dae1c9a49d36ea006c97facf06b4ac25cb
[ "Apache-2.0" ]
null
null
null
tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py
Smokrow/tensorflow
debd66dae1c9a49d36ea006c97facf06b4ac25cb
[ "Apache-2.0" ]
1
2020-02-15T14:34:36.000Z
2020-02-15T14:34:36.000Z
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the MapAndFilterFusion optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.contrib.data.python.ops import optimization from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class MapAndFilterFusionTest(test.TestCase, parameterized.TestCase): @staticmethod def map_functions(): identity = lambda x: x increment = lambda x: x + 1 def increment_and_square(x): y = x + 1 return y * y functions = [identity, increment, increment_and_square] tests = [] for i, fun1 in enumerate(functions): for j, fun2 in enumerate(functions): tests.append(( "Test{}{}".format(i, j), [fun1, fun2], )) for k, fun3 in enumerate(functions): tests.append(( "Test{}{}{}".format(i, j, k), [fun1, fun2, fun3], )) swap = lambda x, n: (n, x) tests.append(( "Swap1", [lambda x: (x, 42), swap], )) tests.append(( "Swap2", [lambda x: (x, 42), swap, swap], )) return tuple(tests) @parameterized.named_parameters(*map_functions.__func__()) def testMapFusion(self, functions): dataset = dataset_ops.Dataset.range(5).apply( optimization.assert_next(["Map", "Prefetch"])) for function in functions: dataset = dataset.map(function) dataset = dataset.prefetch(0).apply(optimization.optimize(["map_fusion"])) iterator = dataset.make_one_shot_iterator() get_next = iterator.get_next() with self.test_session() as sess: for x in range(5): result = sess.run(get_next) r = x for function in functions: if isinstance(r, tuple): r = function(*r) # Pass tuple as multiple arguments. else: r = function(r) self.assertAllEqual(r, result) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @staticmethod def map_and_filter_functions(): identity = lambda x: x increment = lambda x: x + 1 minus_five = lambda x: x - 5 def increment_and_square(x): y = x + 1 return y * y take_all = lambda x: constant_op.constant(True) is_zero = lambda x: math_ops.equal(x, 0) is_odd = lambda x: math_ops.equal(x % 2, 0) greater = lambda x: math_ops.greater(x + 5, 0) functions = [identity, increment, minus_five, increment_and_square] filters = [take_all, is_zero, is_odd, greater] tests = [] for x, fun in enumerate(functions): for y, predicate in enumerate(filters): tests.append(("Mixed{}{}".format(x, y), fun, predicate)) # Multi output tests.append(("Multi1", lambda x: (x, x), lambda x, y: constant_op.constant(True))) tests.append( ("Multi2", lambda x: (x, 2), lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0))) return tuple(tests) @parameterized.named_parameters(*map_and_filter_functions.__func__()) def testMapFilterFusion(self, function, predicate): dataset = dataset_ops.Dataset.range(10).apply( optimization.assert_next( ["Map", "FilterByLastComponent"])).map(function).filter(predicate).apply( optimization.optimize(["map_and_filter_fusion"])) self._testMapAndFilter(dataset, function, predicate) def _testMapAndFilter(self, dataset, function, predicate): iterator = dataset.make_one_shot_iterator() get_next = iterator.get_next() with self.test_session() as sess: for x in range(10): r = function(x) if isinstance(r, tuple): b = predicate(*r) # Pass tuple as multiple arguments. else: b = predicate(r) if sess.run(b): result = sess.run(get_next) self.assertAllEqual(r, result) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) def testAdditionalInputs(self): a = constant_op.constant(3, dtype=dtypes.int64) b = constant_op.constant(4, dtype=dtypes.int64) some_tensor = math_ops.mul(a, b) function = lambda x: x * x def predicate(y): return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor) # We are currently not supporting functions with additional inputs. dataset = dataset_ops.Dataset.range(10).apply( optimization.assert_next( ["Map", "Filter"])).map(function).filter(predicate).apply( optimization.optimize(["map_and_filter_fusion"])) self._testMapAndFilter(dataset, function, predicate) @staticmethod def filter_functions(): take_all = lambda x: constant_op.constant(True) is_zero = lambda x: math_ops.equal(x, 0) greater = lambda x: math_ops.greater(x + 5, 0) tests = [] filters = [take_all, is_zero, greater] identity = lambda x: x for x, predicate_1 in enumerate(filters): for y, predicate_2 in enumerate(filters): tests.append(("Mixed{}{}".format(x, y), identity, [predicate_1, predicate_2])) for z, predicate_3 in enumerate(filters): tests.append(("Mixed{}{}{}".format(x, y, z), identity, [predicate_1, predicate_2, predicate_3])) take_all_multiple = lambda x, y: constant_op.constant(True) # Multi output tests.append(("Multi1", lambda x: (x, x), [take_all_multiple, take_all_multiple])) tests.append(("Multi2", lambda x: (x, 2), [ take_all_multiple, lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0) ])) return tuple(tests) @parameterized.named_parameters(*filter_functions.__func__()) def testFilterFusion(self, map_function, predicates): dataset = dataset_ops.Dataset.range(5).apply( optimization.assert_next(["Map", "Filter", "Prefetch"])).map(map_function) for predicate in predicates: dataset = dataset.filter(predicate) dataset = dataset.prefetch(0).apply( optimization.optimize(["filter_fusion"])) iterator = dataset.make_one_shot_iterator() get_next = iterator.get_next() with self.test_session() as sess: for x in range(5): r = map_function(x) filtered = False for predicate in predicates: if isinstance(r, tuple): b = predicate(*r) # Pass tuple as multiple arguments. else: b = predicate(r) if not sess.run(b): filtered = True break if not filtered: result = sess.run(get_next) self.assertAllEqual(r, result) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) if __name__ == "__main__": test.main()
34.515556
80
0.638424
970
7,766
4.954639
0.206186
0.036413
0.02164
0.017478
0.54432
0.492509
0.474823
0.414274
0.414274
0.354765
0
0.01271
0.240149
7,766
224
81
34.669643
0.801729
0.116147
0
0.474286
0
0
0.031437
0.009212
0
0
0
0
0.057143
1
0.062857
false
0
0.062857
0.005714
0.165714
0.005714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a7f52df743becc0516c5282308cd0c5db04737d
16,979
py
Python
meerk40t/lihuiyu/lihuiyuemulator.py
jpirnay/meerk40t
10d4e41a8c5e2bb95a504904273699e115822b9b
[ "MIT" ]
null
null
null
meerk40t/lihuiyu/lihuiyuemulator.py
jpirnay/meerk40t
10d4e41a8c5e2bb95a504904273699e115822b9b
[ "MIT" ]
null
null
null
meerk40t/lihuiyu/lihuiyuemulator.py
jpirnay/meerk40t
10d4e41a8c5e2bb95a504904273699e115822b9b
[ "MIT" ]
null
null
null
from meerk40t.core.cutcode import CutCode, RawCut from meerk40t.core.parameters import Parameters from meerk40t.core.units import UNITS_PER_MIL from meerk40t.kernel import Module from meerk40t.numpath import Numpath from meerk40t.svgelements import Color class LihuiyuEmulator(Module): def __init__(self, context, path): Module.__init__(self, context, path) self.context.setting(bool, "fix_speeds", False) self.parser = LihuiyuParser() self.parser.fix_speeds = self.context.fix_speeds self.parser.channel = self.context.channel("lhy") def pos(p): if p is None: return x0, y0, x1, y1 = p self.context.signal("emulator;position", (x0, y0, x1, y1)) self.parser.position = pos def __repr__(self): return "LihuiyuEmulator(%s)" % self.name def initialize(self, *args, **kwargs): context = self.context active = self.context.root.active send = context.channel("%s/usb_send" % active) send.watch(self.parser.write_packet) def finalize(self, *args, **kwargs): context = self.context active = self.context.root.active send = context.channel("%s/usb_send" % active) send.unwatch(self.parser.write_packet) class LihuiyuParser: """ LihuiyuParser parses LHYMicro-GL code with a state diagram. This should accurately reconstruct the values. When the position is changed it calls a self.position() function if one exists. """ def __init__(self): self.channel = None self.position = None self.board = "M2" self.header_skipped = False self.count_lines = 0 self.count_flag = 0 self.settings = Parameters({"speed": 20.0, "power": 1000.0}) self.speed_code = None self.x = 0.0 self.y = 0.0 self.number_value = "" self.distance_x = 0 self.distance_y = 0 self.filename = "" self.laser = 0 self.left = False self.top = False self.x_on = False self.y_on = False self.small_jump = False self.returning_compact = True self.returning_finished = False self.mode = None self.raster_step = 0 self.paused_state = False self.compact_state = False self.finish_state = False self.horizontal_major = False self.fix_speeds = False self.number_consumer = {} def parse(self, data, elements): self.path = Numpath() def position(p): if p is None: return from_x, from_y, to_x, to_y = p if self.program_mode: if self.laser: self.path.line(complex(from_x, from_y), complex(to_x, to_y)) self.position = position self.write(data) self.path.uscale(UNITS_PER_MIL) elements.elem_branch.add( type="elem numpath", path=self.path, stroke=Color("black"), **self.settings.settings, ) elements.signal("refresh_scene", 0) @property def program_mode(self): return self.compact_state @property def default_mode(self): return not self.compact_state @property def raster_mode(self): return self.settings.get("raster_step", 0) != 0 def new_file(self): self.header_skipped = False self.count_flag = 0 self.count_lines = 0 @staticmethod def remove_header(data): count_lines = 0 count_flag = 0 for i in range(len(data)): b = data[i] c = chr(b) if c == "\n": count_lines += 1 elif c == "%": count_flag += 1 if count_lines >= 3 and count_flag >= 5: return data[i:] def header_write(self, data): """ Write data to the emulator including the header. This is intended for saved .egv files which include a default header. """ if self.header_skipped: self.write(data) else: data = LihuiyuParser.remove_header(data) self.write(data) def write_packet(self, packet): self.write(packet[1:31]) def write(self, data): for b in data: self.process(b, chr(b)) def distance_consumer(self, c): self.number_value += c if len(self.number_value) >= 3: self.append_distance(int(self.number_value)) self.number_value = "" def speedcode_b1_consumer(self, c): self.number_value += c if len(self.number_value) >= 3: if self.channel: self.channel("Speedcode B1 = %s" % self.number_value) self.number_value = "" self.number_consumer = self.speedcode_b2_consumer def speedcode_b2_consumer(self, c): self.number_value += c if len(self.number_value) >= 3: if self.channel: self.channel("Speedcode B2 = %s" % self.number_value) self.number_value = "" self.number_consumer = self.speedcode_accel_consumer def speedcode_accel_consumer(self, c): self.number_value += c if len(self.number_value) >= 1: if self.channel: self.channel("Speedcode Accel = %s" % self.number_value) self.number_value = "" self.number_consumer = self.speedcode_mult_consumer def speedcode_mult_consumer(self, c): self.number_value += c if len(self.number_value) >= 3: if self.channel: self.channel("Speedcode Accel = %s" % self.number_value) self.number_value = "" self.number_consumer = self.speedcode_dratio_b1_consumer def speedcode_dratio_b1_consumer(self, c): self.number_value += c if len(self.number_value) >= 3: if self.channel: self.channel("Speedcode Dratio b1 = %s" % self.number_value) self.number_value = "" self.number_consumer = self.speedcode_dratio_b2_consumer def speedcode_dratio_b2_consumer(self, c): self.number_value += c if len(self.number_value) >= 3: if self.channel: self.channel("Speedcode Dratio b2 = %s" % self.number_value) self.number_value = "" self.number_consumer = self.distance_consumer def raster_step_consumer(self, c): self.number_value += c if len(self.number_value) >= 3: if self.channel: self.channel("Raster Step = %s" % self.number_value) self.raster_step = int(self.number_value) self.number_value = "" self.number_consumer = self.distance_consumer def mode_consumer(self, c): self.number_value += c if len(self.number_value) >= 1: if self.channel: self.channel("Set Mode = %s" % self.number_value) self.mode = int(self.number_value) self.number_value = "" self.number_consumer = self.speedcode_mult_consumer def append_distance(self, amount): if self.x_on: self.distance_x += amount if self.y_on: self.distance_y += amount def execute_distance(self): if self.distance_x != 0 or self.distance_y != 0: dx = self.distance_x dy = self.distance_y if self.left: dx = -dx if self.top: dy = -dy self.distance_x = 0 self.distance_y = 0 ox = self.x oy = self.y self.x += dx self.y += dy if self.position: self.position((ox, oy, self.x, self.y)) if self.channel: self.channel("Moving (%d %d) now at %d %d" % (dx, dy, self.x, self.y)) def process(self, b, c): if c == "I": self.finish_state = False self.compact_state = False self.paused_state = False self.distance_x = 0 self.distance_y = 0 if self.finish_state: # In finished all commands are black holed return if ord("0") <= b <= ord("9"): self.number_consumer(c) return else: self.number_consumer = self.distance_consumer self.number_value = "" if self.compact_state: # Every command in compact state executes distances. self.execute_distance() if c == "|": self.append_distance(25) self.small_jump = True elif ord("a") <= b <= ord("y"): self.append_distance(b + 1 - ord("a")) self.small_jump = False elif c == "z": self.append_distance(26 if self.small_jump else 255) self.small_jump = False elif c == "B": # Move to Right. if self.left and self.horizontal_major: # Was T switched to B with horizontal rastering. if self.raster_step: self.distance_y += self.raster_step self.left = False self.x_on = True self.y_on = False if self.channel: self.channel("Right") elif c == "T": # Move to Left if not self.left and self.horizontal_major: # Was T switched to B with horizontal rastering. if self.raster_step: self.distance_y += self.raster_step self.left = True self.x_on = True self.y_on = False if self.channel: self.channel("Left") elif c == "R": # Move to Bottom if self.top and not self.horizontal_major: # Was L switched to R with vertical rastering. if self.raster_step: self.distance_x += self.raster_step self.top = False self.x_on = False self.y_on = True if self.channel: self.channel("Bottom") elif c == "L": # Move to Top if not self.top and not self.horizontal_major: # Was R switched to L with vertical rastering. if self.raster_step: self.distance_x += self.raster_step self.top = True self.x_on = False self.y_on = True if self.channel: self.channel("Top") elif c == "U": self.laser = 0 if self.channel: self.channel("Laser Off") elif c == "D": self.laser = 1 if self.channel: self.channel("Laser On") elif c == "F": if self.channel: self.channel("Finish") self.returning_compact = False self.returning_finished = True elif c == "@": if self.channel: self.channel("Reset") self.returning_finished = False self.returning_compact = False elif c in "C": if self.channel: self.channel("Speedcode") self.speed_code = "" elif c in "V": self.raster_step = None if self.channel: self.channel("Velocity") self.number_consumer = self.speedcode_b1_consumer elif c in "G": if self.channel: self.channel("Step Value") self.number_consumer = self.raster_step_consumer elif c == "S": if self.channel: self.channel("Mode Set") self.laser = 0 self.execute_distance() self.mode = None self.number_consumer = self.mode_consumer elif c == "E": if self.channel: self.channel("Execute State") if self.mode is None: if self.returning_compact: self.compact_state = True if self.returning_finished: self.finish_state = True if self.horizontal_major: self.left = not self.left self.x_on = True self.y_on = False if self.raster_step: self.distance_y += self.raster_step else: # vertical major self.top = not self.top self.x_on = False self.y_on = True if self.raster_step: self.distance_x += self.raster_step elif self.mode == 0: # Homes then moves position. pass elif self.mode == 1: self.compact_state = True self.horizontal_major = self.x_on if self.channel: self.channel("Setting Axis: h=" + str(self.x_on)) elif self.mode == 2: # Rail unlocked. self.compact_state = True self.returning_finished = False self.returning_compact = True self.laser = 0 elif c == "P": if self.channel: self.channel("Pause") self.laser = 0 if self.paused_state: # Home sequence triggered by 2 P commands in the same packet. # This should resume if not located within the same packet. if self.position: self.position((self.x, self.y, 0, 0)) self.x = 0 self.y = 0 self.distance_y = 0 self.distance_x = 0 self.finish_state = True self.paused_state = False else: self.execute_distance() # distance is executed by a P command self.paused_state = True elif c == "N": if self.channel: self.channel("N") self.execute_distance() # distance is executed by an N command. self.laser = 0 self.compact_state = False if self.position: self.position(None) elif c == "M": self.x_on = True self.y_on = True if self.channel: a = "Top" if self.top else "Bottom" b = "Left" if self.left else "Right" self.channel("Diagonal %s %s" % (a, b)) class EGVBlob: def __init__(self, data: bytearray, name=None): self.name = name self.data = data self.operation = "blob" self._cutcode = None self._cut = None def __repr__(self): return "EGV(%s, %d bytes)" % (self.name, len(self.data)) def as_cutobjects(self): parser = LihuiyuParser() self._cutcode = CutCode() self._cut = RawCut() def new_cut(): if self._cut is not None and len(self._cut): self._cutcode.append(self._cut) self._cut = RawCut() self._cut.settings = dict(parser.settings) def position(p): if p is None or self._cut is None: new_cut() return from_x, from_y, to_x, to_y = p if parser.program_mode: if len(self._cut.plot) == 0: self._cut.plot_append(int(from_x), int(from_y), parser.laser) self._cut.plot_append(int(to_x), int(to_y), parser.laser) else: new_cut() parser.position = position parser.header_write(self.data) cutcode = self._cutcode self._cut = None self._cutcode = None return cutcode def generate(self): yield "blob", "egv", LihuiyuParser.remove_header(self.data) class EgvLoader: @staticmethod def remove_header(data): count_lines = 0 count_flag = 0 for i in range(len(data)): b = data[i] c = chr(b) if c == "\n": count_lines += 1 elif c == "%": count_flag += 1 if count_lines >= 3 and count_flag >= 5: return data[i:] @staticmethod def load_types(): yield "Engrave Files", ("egv",), "application/x-egv" @staticmethod def load(kernel, elements_modifier, pathname, **kwargs): import os basename = os.path.basename(pathname) with open(pathname, "rb") as f: op_branch = elements_modifier.get(type="branch ops") op_branch.add( data=bytearray(EgvLoader.remove_header(f.read())), data_type="egv", type="blob", name=basename, ) return True
32.777992
118
0.52777
1,998
16,979
4.325325
0.133133
0.03888
0.069428
0.049178
0.476394
0.39875
0.355473
0.314973
0.297153
0.297153
0
0.010703
0.378173
16,979
517
119
32.841393
0.807823
0.052182
0
0.487356
0
0
0.034943
0
0
0
0
0
0
1
0.085057
false
0.002299
0.016092
0.011494
0.142529
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a7fd9c2a4520acac2ad0d4b073014e3ffeaa218
20,152
py
Python
oauth/provider.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
2,027
2019-11-12T18:05:48.000Z
2022-03-31T22:25:04.000Z
oauth/provider.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
496
2019-11-12T18:13:37.000Z
2022-03-31T10:43:45.000Z
oauth/provider.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
249
2019-11-12T18:02:27.000Z
2022-03-22T12:19:19.000Z
# Ported to Python 3 # Originally from https://github.com/DeprecatedCode/oauth2lib/blob/d161b010f8a596826050a09e5e94d59443cc12d9/oauth2lib/provider.py import json import logging from requests import Response from io import StringIO try: from werkzeug.exceptions import Unauthorized except ImportError: Unauthorized = Exception from oauth import utils class Provider(object): """Base provider class for different types of OAuth 2.0 providers.""" def _handle_exception(self, exc): """Handle an internal exception that was caught and suppressed. :param exc: Exception to process. :type exc: Exception """ logger = logging.getLogger(__name__) logger.exception(exc) def _make_response(self, body="", headers=None, status_code=200): """Return a response object from the given parameters. :param body: Buffer/string containing the response body. :type body: str :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response """ res = Response() res.status_code = status_code if headers is not None: res.headers.update(headers) res.raw = StringIO(body) return res def _make_redirect_error_response(self, redirect_uri, err): """Return a HTTP 302 redirect response object containing the error. :param redirect_uri: Client redirect URI. :type redirect_uri: str :param err: OAuth error message. :type err: str :rtype: requests.Response """ params = {"error": err, "response_type": None, "client_id": None, "redirect_uri": None} redirect = utils.build_url(redirect_uri, params) return self._make_response(headers={"Location": redirect}, status_code=302) def _make_json_response(self, data, headers=None, status_code=200): """Return a response object from the given JSON data. :param data: Data to JSON-encode. :type data: mixed :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response """ response_headers = {} if headers is not None: response_headers.update(headers) response_headers["Content-Type"] = "application/json;charset=UTF-8" response_headers["Cache-Control"] = "no-store" response_headers["Pragma"] = "no-cache" return self._make_response(json.dumps(data), response_headers, status_code) def _make_json_error_response(self, err): """Return a JSON-encoded response object representing the error. :param err: OAuth error message. :type err: str :rtype: requests.Response """ return self._make_json_response({"error": err}, status_code=400) def _invalid_redirect_uri_response(self): """What to return when the redirect_uri parameter is missing. :rtype: requests.Response """ return self._make_json_error_response("invalid_request") class AuthorizationProvider(Provider): """OAuth 2.0 authorization provider. This class manages authorization codes and access tokens. Certain methods MUST be overridden in a subclass, thus this class cannot be directly used as a provider. These are the methods that must be implemented in a subclass: validate_client_id(self, client_id) # Return True or False validate_client_secret(self, client_id, client_secret) # Return True or False validate_scope(self, client_id, scope) # Return True or False validate_redirect_uri(self, client_id, redirect_uri) # Return True or False validate_access(self) # Use this to validate your app session user # Return True or False from_authorization_code(self, client_id, code, scope) # Return mixed data or None on invalid from_refresh_token(self, client_id, refresh_token, scope) # Return mixed data or None on invalid persist_authorization_code(self, client_id, code, scope) # Return value ignored persist_token_information(self, client_id, scope, access_token, token_type, expires_in, refresh_token, data) # Return value ignored discard_authorization_code(self, client_id, code) # Return value ignored discard_refresh_token(self, client_id, refresh_token) # Return value ignored Optionally, the following may be overridden to acheive desired behavior: @property token_length(self) @property token_type(self) @property token_expires_in(self) generate_authorization_code(self) generate_access_token(self) generate_refresh_token(self) """ @property def token_length(self): """Property method to get the length used to generate tokens. :rtype: int """ return 40 @property def token_type(self): """Property method to get the access token type. :rtype: str """ return "Bearer" @property def token_expires_in(self): """Property method to get the token expiration time in seconds. :rtype: int """ return 3600 def generate_authorization_code(self): """Generate a random authorization code. :rtype: str """ return utils.random_ascii_string(self.token_length) def generate_access_token(self): """Generate a random access token. :rtype: str """ return utils.random_ascii_string(self.token_length) def generate_refresh_token(self): """Generate a random refresh token. :rtype: str """ return utils.random_ascii_string(self.token_length) def get_authorization_code(self, response_type, client_id, redirect_uri, **params): """Generate authorization code HTTP response. :param response_type: Desired response type. Must be exactly "code". :type response_type: str :param client_id: Client ID. :type client_id: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :rtype: requests.Response """ # Ensure proper response_type if response_type != "code": err = "unsupported_response_type" return self._make_redirect_error_response(redirect_uri, err) # Check redirect URI is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri) if not is_valid_redirect_uri: return self._invalid_redirect_uri_response() # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_access = self.validate_access() scope = params.get("scope", "") is_valid_scope = self.validate_scope(client_id, scope) # Return proper error responses on invalid conditions if not is_valid_client_id: err = "unauthorized_client" return self._make_redirect_error_response(redirect_uri, err) if not is_valid_access: err = "access_denied" return self._make_redirect_error_response(redirect_uri, err) if not is_valid_scope: err = "invalid_scope" return self._make_redirect_error_response(redirect_uri, err) # Generate authorization code code = self.generate_authorization_code() # Save information to be used to validate later requests self.persist_authorization_code(client_id=client_id, code=code, scope=scope) # Return redirection response params.update( {"code": code, "response_type": None, "client_id": None, "redirect_uri": None} ) redirect = utils.build_url(redirect_uri, params) return self._make_response(headers={"Location": redirect}, status_code=302) def refresh_token(self, grant_type, client_id, client_secret, refresh_token, **params): """Generate access token HTTP response from a refresh token. :param grant_type: Desired grant type. Must be "refresh_token". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param refresh_token: Refresh token. :type refresh_token: str :rtype: requests.Response """ # Ensure proper grant_type if grant_type != "refresh_token": return self._make_json_error_response("unsupported_grant_type") # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_client_secret = self.validate_client_secret(client_id, client_secret) scope = params.get("scope", "") is_valid_scope = self.validate_scope(client_id, scope) data = self.from_refresh_token(client_id, refresh_token, scope) is_valid_refresh_token = data is not None # Return proper error responses on invalid conditions if not (is_valid_client_id and is_valid_client_secret): return self._make_json_error_response("invalid_client") if not is_valid_scope: return self._make_json_error_response("invalid_scope") if not is_valid_refresh_token: return self._make_json_error_response("invalid_grant") # Discard original refresh token self.discard_refresh_token(client_id, refresh_token) # Generate access tokens once all conditions have been met access_token = self.generate_access_token() token_type = self.token_type expires_in = self.token_expires_in refresh_token = self.generate_refresh_token() # Save information to be used to validate later requests self.persist_token_information( client_id=client_id, scope=scope, access_token=access_token, token_type=token_type, expires_in=expires_in, refresh_token=refresh_token, data=data, ) # Return json response return self._make_json_response( { "access_token": access_token, "token_type": token_type, "expires_in": expires_in, "refresh_token": refresh_token, } ) def get_token(self, grant_type, client_id, client_secret, redirect_uri, code, **params): """Generate access token HTTP response. :param grant_type: Desired grant type. Must be "authorization_code". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :param code: Authorization code. :type code: str :rtype: requests.Response """ # Ensure proper grant_type if grant_type != "authorization_code": return self._make_json_error_response("unsupported_grant_type") # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_client_secret = self.validate_client_secret(client_id, client_secret) is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri) scope = params.get("scope", "") is_valid_scope = self.validate_scope(client_id, scope) data = self.from_authorization_code(client_id, code, scope) is_valid_grant = data is not None # Return proper error responses on invalid conditions if not (is_valid_client_id and is_valid_client_secret): return self._make_json_error_response("invalid_client") if not is_valid_grant or not is_valid_redirect_uri: return self._make_json_error_response("invalid_grant") if not is_valid_scope: return self._make_json_error_response("invalid_scope") # Discard original authorization code self.discard_authorization_code(client_id, code) # Generate access tokens once all conditions have been met access_token = self.generate_access_token() token_type = self.token_type expires_in = self.token_expires_in refresh_token = self.generate_refresh_token() # Save information to be used to validate later requests self.persist_token_information( client_id=client_id, scope=scope, access_token=access_token, token_type=token_type, expires_in=expires_in, refresh_token=refresh_token, data=data, ) # Return json response return self._make_json_response( { "access_token": access_token, "token_type": token_type, "expires_in": expires_in, "refresh_token": refresh_token, } ) def get_authorization_code_from_uri(self, uri): """Get authorization code response from a URI. This method will ignore the domain and path of the request, instead automatically parsing the query string parameters. :param uri: URI to parse for authorization information. :type uri: str :rtype: requests.Response """ params = utils.url_query_params(uri) try: if "response_type" not in params: raise TypeError("Missing parameter response_type in URL query") if "client_id" not in params: raise TypeError("Missing parameter client_id in URL query") if "redirect_uri" not in params: raise TypeError("Missing parameter redirect_uri in URL query") return self.get_authorization_code(**params) except TypeError as exc: self._handle_exception(exc) # Catch missing parameters in request err = "invalid_request" if "redirect_uri" in params: u = params["redirect_uri"] return self._make_redirect_error_response(u, err) else: return self._invalid_redirect_uri_response() except StandardError as exc: self._handle_exception(exc) # Catch all other server errors err = "server_error" u = params["redirect_uri"] return self._make_redirect_error_response(u, err) def get_token_from_post_data(self, data): """Get a token response from POST data. :param data: POST data containing authorization information. :type data: dict :rtype: requests.Response """ try: # Verify OAuth 2.0 Parameters for x in ["grant_type", "client_id", "client_secret"]: if not data.get(x): raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x)) # Handle get token from refresh_token if "refresh_token" in data: return self.refresh_token(**data) # Handle get token from authorization code for x in ["redirect_uri", "code"]: if not data.get(x): raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x)) return self.get_token(**data) except TypeError as exc: self._handle_exception(exc) # Catch missing parameters in request return self._make_json_error_response("invalid_request") except StandardError as exc: self._handle_exception(exc) # Catch all other server errors return self._make_json_error_response("server_error") def validate_client_id(self, client_id): raise NotImplementedError("Subclasses must implement " "validate_client_id.") def validate_client_secret(self, client_id, client_secret): raise NotImplementedError("Subclasses must implement " "validate_client_secret.") def validate_redirect_uri(self, client_id, redirect_uri): raise NotImplementedError("Subclasses must implement " "validate_redirect_uri.") def validate_scope(self, client_id, scope): raise NotImplementedError("Subclasses must implement " "validate_scope.") def validate_access(self): raise NotImplementedError("Subclasses must implement " "validate_access.") def from_authorization_code(self, client_id, code, scope): raise NotImplementedError("Subclasses must implement " "from_authorization_code.") def from_refresh_token(self, client_id, refresh_token, scope): raise NotImplementedError("Subclasses must implement " "from_refresh_token.") def persist_authorization_code(self, client_id, code, scope): raise NotImplementedError("Subclasses must implement " "persist_authorization_code.") def persist_token_information( self, client_id, scope, access_token, token_type, expires_in, refresh_token, data ): raise NotImplementedError("Subclasses must implement " "persist_token_information.") def discard_authorization_code(self, client_id, code): raise NotImplementedError("Subclasses must implement " "discard_authorization_code.") def discard_refresh_token(self, client_id, refresh_token): raise NotImplementedError("Subclasses must implement " "discard_refresh_token.") class OAuthError(Unauthorized): """OAuth error, including the OAuth error reason.""" def __init__(self, reason, *args, **kwargs): self.reason = reason super(OAuthError, self).__init__(*args, **kwargs) class ResourceAuthorization(object): """A class containing an OAuth 2.0 authorization.""" is_oauth = False is_valid = None token = None client_id = None expires_in = None error = None def raise_error_if_invalid(self): if not self.is_valid: raise OAuthError(self.error, "OAuth authorization error") class ResourceProvider(Provider): """OAuth 2.0 resource provider. This class provides an interface to validate an incoming request and authenticate resource access. Certain methods MUST be overridden in a subclass, thus this class cannot be directly used as a resource provider. These are the methods that must be implemented in a subclass: get_authorization_header(self) # Return header string for key "Authorization" or None validate_access_token(self, access_token, authorization) # Set is_valid=True, client_id, and expires_in attributes # on authorization if authorization was successful. # Return value is ignored """ @property def authorization_class(self): return ResourceAuthorization def get_authorization(self): """Get authorization object representing status of authentication.""" auth = self.authorization_class() header = self.get_authorization_header() if not header or not header.split: return auth header = header.split() if len(header) > 1 and header[0] == "Bearer": auth.is_oauth = True access_token = header[1] self.validate_access_token(access_token, auth) if not auth.is_valid: auth.error = "access_denied" return auth def get_authorization_header(self): raise NotImplementedError("Subclasses must implement " "get_authorization_header.") def validate_access_token(self, access_token, authorization): raise NotImplementedError("Subclasses must implement " "validate_token.")
35.730496
129
0.654575
2,366
20,152
5.319527
0.105664
0.04513
0.025584
0.020022
0.622755
0.592404
0.517798
0.472906
0.411727
0.39679
0
0.005184
0.272529
20,152
563
130
35.793961
0.853342
0.323839
0
0.403162
0
0
0.126022
0.023411
0
0
0
0
0
1
0.134387
false
0
0.027668
0.003953
0.355731
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a811562ddff805b40048018c138048e412a8c98
773
py
Python
main.py
TomHacker/ImageCluster
c4262e08a61c50b6d850ba29bc4d56d21c789aa9
[ "Apache-2.0" ]
10
2019-04-08T06:46:35.000Z
2019-10-31T11:10:32.000Z
main.py
HandsomeBrotherShuaiLi/ImageCluster
c4262e08a61c50b6d850ba29bc4d56d21c789aa9
[ "Apache-2.0" ]
3
2020-06-02T01:24:18.000Z
2021-05-20T04:53:26.000Z
main.py
HandsomeBrotherShuaiLi/ImageCluster
c4262e08a61c50b6d850ba29bc4d56d21c789aa9
[ "Apache-2.0" ]
1
2019-05-23T11:08:04.000Z
2019-05-23T11:08:04.000Z
from model import ImageCluster m=ImageCluster( base_model='vgg16',#your feature map extractor model resorted_img_folder='resorted_data',#the folder for clustered images cluster_algo='kmeans',#cluster algorithm base_img_folder='data', maxK=150,#the max k num is 30, which means ImageCluster calculates every k in range(2,30+1) ) # calculate the feature maps # m.get_feature_map( # resize_shape=(224,224) # (w,h) a tuple for resizing the input images to the same shape # ) # #clustering for feature maps # m.imagecluster() #As we can see, 21 may be the best cluster number for this dataset. #So,we can call the resorted_img function to label the images under different folders m.resorted_img( selected_k_num=100# a int number in range[2,maxK] )
36.809524
95
0.750323
126
773
4.492063
0.587302
0.058304
0.028269
0
0
0
0
0
0
0
0
0.035714
0.166882
773
20
96
38.65
0.843168
0.679172
0
0
0
0
0.12069
0
0
0
0
0
0
1
0
false
0
0.090909
0
0.090909
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a82018dc0f7662911572e4ff805c96d468e9254
2,330
py
Python
JumpscaleCore/tools/executor/ExecutorSerial.py
gneumann333/jumpscaleX_core
777d249fa3668c6e802c2f765f4b82fb39c3e5fa
[ "Apache-2.0" ]
1
2020-06-21T11:18:52.000Z
2020-06-21T11:18:52.000Z
JumpscaleCore/tools/executor/ExecutorSerial.py
gneumann333/jumpscaleX_core
777d249fa3668c6e802c2f765f4b82fb39c3e5fa
[ "Apache-2.0" ]
644
2019-08-25T10:19:56.000Z
2020-12-23T09:41:04.000Z
JumpscaleCore/tools/executor/ExecutorSerial.py
gneumann333/jumpscaleX_core
777d249fa3668c6e802c2f765f4b82fb39c3e5fa
[ "Apache-2.0" ]
11
2019-08-29T21:38:50.000Z
2020-06-21T11:18:55.000Z
from Jumpscale import j JSBASE = j.baseclasses.object from .ExecutorBase import * import serial class ExecutorSerial(ExecutorBase): """ This executor is primary made to communicate with devices (routers, switch, ...) over console cable but you can use underlaying method to communicate with any serial device. Please note that default mode attempt to recognize a device with cisco like commands. """ def __init__(self, device, baudrate=9600, type="serial", parity="N", stopbits=1, bytesize=8, timeout=1): ExecutorBase.__init__(self, checkok=False) self.device = device self.baudrate = baudrate self.type = type self.parity = parity self.stopbits = stopbits self.bytesize = bytesize self.timeout = timeout self._id = None self._log_info("Initialized") self.reconnect() self.fetch() def reconnect(self): self.console = serial.Serial( port=self.device, baudrate=self.baudrate, parity=self.parity, stopbits=self.stopbits, bytesize=self.bytesize, timeout=self.timeout, ) return True @property def id(self): if self._id is None: self._id = "serial.%s" % (self.device) return self._id def execute(self, cmds, die=True, checkok=None, showout=True, timeout=0, env={}): self._log_debug("Serial command: %s" % cmds) if not cmds.endswith("\n"): cmds += "\n" self.send(cmds) return 0, "", "" def send(self, data): self.console.write(data.encode("utf-8")) def fetch(self): input = self.console.read_all() return input.decode("utf-8") def enter(self, command): self.send(command) self.send("\n") def _execute_script(self, content="", die=True, showout=True, checkok=None): raise j.exceptions.NotImplemented() def upload(self, source, dest, dest_prefix="", recursive=True, createdir=True): raise j.exceptions.NotImplemented() def download(self, source, dest, source_prefix="", recursive=True): raise j.exceptions.NotImplemented() def __repr__(self): return "Executor serial: %s" % (self.device) __str__ = __repr__
27.093023
108
0.615021
274
2,330
5.109489
0.386861
0.035714
0.034286
0.064286
0.076429
0.052857
0
0
0
0
0
0.006478
0.271245
2,330
85
109
27.411765
0.818021
0.111588
0
0.053571
0
0
0.039101
0
0
0
0
0
0
1
0.196429
false
0
0.053571
0.017857
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a83ea727e6668f4f022e77a641fbd9d212a22e3
8,749
py
Python
feed/serializers/extensions.py
cul-it/arxiv-rss
40c0e859528119cc8ba3700312cb8df095d95cdd
[ "MIT" ]
4
2020-06-29T15:05:37.000Z
2022-02-02T10:28:28.000Z
feed/serializers/extensions.py
arXiv/arxiv-feed
82923d062e2524df94c22490cf936a988559ce66
[ "MIT" ]
12
2020-03-06T16:45:00.000Z
2022-03-02T15:36:14.000Z
feed/serializers/extensions.py
cul-it/arxiv-rss
40c0e859528119cc8ba3700312cb8df095d95cdd
[ "MIT" ]
2
2020-12-06T16:30:06.000Z
2021-11-05T12:29:08.000Z
"""Classes derived from the Feedgen extension classes.""" from typing import Dict, List, Optional from lxml import etree from lxml.etree import Element from flask import current_app from feedgen.ext.base import BaseEntryExtension, BaseExtension from feed.domain import Author, Media class ArxivExtension(BaseExtension): """Extension of the Feedgen class to allow us to change its behavior.""" def extend_atom(self: BaseExtension, atom_feed: Element) -> Element: """Allow the extension to modify the initial feed tree for Atom. Parameters ---------- atom_feed : Element The feed's root element. Returns ------- atom_feed : Element The feed's root element. """ return atom_feed def extend_rss(self: BaseExtension, rss_feed: Element) -> Element: """Allow the extension to modify the initial feed tree for RSS. Parameters ---------- rss_feed : Element The feed's root element. Returns ------- rss_feed : Element The feed's root element. """ return rss_feed def extend_ns(self: BaseExtension) -> Dict[str, str]: """ Define the feed's namespaces. Returns ------- namespaces : Dict[str, str] Definitions of the "arxiv" namespaces. """ return { "arxiv": "http://arxiv.org/schemas/atom", "content": "http://purl.org/rss/1.0/modules/content/", "taxo": "http://purl.org/rss/1.0/modules/taxonomy/", "syn": "http://purl.org/rss/1.0/modules/syndication/", "admin": "http://webns.net/mvcb/", "media": "http://search.yahoo.com/mrss", } class ArxivAtomExtension(BaseEntryExtension): """Atom only extension.""" def extend_ns(self: BaseExtension) -> Dict[str, str]: """ Define the feed's namespaces. Returns ------- namespaces : Dict[str, str] Definitions of the "arxiv" namespaces. """ return { "arxiv": "http://arxiv.org/schemas/atom", } class ArxivEntryExtension(BaseEntryExtension): """Extension of the Entry class to allow us to change its behavior.""" def __init__(self: BaseEntryExtension): """Initialize the member values to all be empty.""" self.__arxiv_authors: List[Author] = [] self.__arxiv_media: List[Media] = [] self.__arxiv_comment: Optional[str] = None self.__arxiv_primary_category: Optional[str] = None self.__arxiv_doi: Optional[dict] = None self.__arxiv_affiliation: Optional[str] = None self.__arxiv_journal_ref: Optional[str] = None self.__arxiv_affiliations: Dict = {} def __add_media(self, entry: Element) -> None: for media in self.__arxiv_media: group = etree.SubElement( entry, "{http://search.yahoo.com/mrss}group" ) title = etree.SubElement( group, "{http://search.yahoo.com/mrss}title" ) title.text = media.title etree.SubElement( group, "{http://search.yahoo.com/mrss}content", attrib={"url": media.url, "type": media.type}, ) def extend_atom(self, entry: Element) -> Element: """ Allow the extension to modify the entry element for Atom serialization. Parameters ---------- entry : Element The FeedEntry to modify. Returns ------- entry : Element The modified entry. """ if self.__arxiv_comment: comment_element = etree.SubElement( entry, "{http://arxiv.org/schemas/atom}comment" ) comment_element.text = self.__arxiv_comment if self.__arxiv_primary_category: etree.SubElement( entry, "{http://arxiv.org/schemas/atom}primary_category", attrib=self.__arxiv_primary_category, ) if self.__arxiv_journal_ref: journal_ref_element = etree.SubElement( entry, "{http://arxiv.org/schemas/atom}journal_ref" ) journal_ref_element.text = self.__arxiv_journal_ref if self.__arxiv_doi: for doi in self.__arxiv_doi: doi_element = etree.SubElement( entry, "{http://arxiv.org/schemas/atom}doi" ) doi_element.text = doi # Check each of the entry's author nodes for entry_child in entry: if entry_child.tag == "author": author = entry_child for author_child in author: # If the author's name is in the affiliation dictionary, # add Elements for all of its affiliations. if author_child.tag == "name": name = author_child.text affiliations = self.__arxiv_affiliations.get(name, []) for affiliation in affiliations: element = etree.SubElement( author, "{http://arxiv.org/schemas/atom}affiliation", ) element.text = affiliation self.__add_media(entry=entry) return entry def extend_rss(self, entry: Element) -> Element: """Allow the extension to modify the entry element for RSS. Parameters ---------- entry : Element The FeedEntry to modify. Returns ------- entry : Element The modified entry. """ base_server: str = current_app.config["BASE_SERVER"] for entry_child in entry: if entry_child.tag == "description": description = "<p>Authors: " first = True for author in self.__arxiv_authors: if first: first = False else: description += ", " name = ( f"{author.last_name}," f"+{author.initials.replace(' ', '+')}" ) description += ( f'<a href="http://{base_server}/search/?query={name}&' f'searchtype=author">{author.full_name}</a>' ) description += f"</p><p>{entry_child.text}</p>" entry_child.text = description self.__add_media(entry=entry) return entry def author(self, author: Author) -> None: """Add an author value to this entry. Parameters ---------- author : Author Paper author. """ self.__arxiv_authors.append(author) def media(self, media: Media) -> None: """Add a media item. Parameters ---------- media: Dict[str, str] Dictionary with url and type attributes. """ self.__arxiv_media.append(media) def comment(self, text: str) -> None: """Assign the comment value to this entry. Parameters ---------- text : str The new comment text. """ self.__arxiv_comment = text def primary_category(self, text: str) -> None: """Assign the primary_category value to this entry. Parameters ---------- text : str The new primary_category name. """ self.__arxiv_primary_category = text def journal_ref(self, text: str) -> None: """Assign the journal_ref value to this entry. Parameters ---------- text : str The new journal_ref value. """ self.__arxiv_journal_ref = text def doi(self, doi_list: Dict[str, str]) -> None: """Assign the set of DOI definitions for this entry. Parameters ---------- doi_list : Dict[str, str] A dictionary of DOI assignments. """ self.__arxiv_doi = doi_list def affiliation(self, full_name: str, affiliations: List[str]) -> None: """Assign an affiliation for one author of this entry. Parameters ---------- full_name : str An author's full name. affiliations : List[str] The code for the author's affiliated institution. """ self.__arxiv_affiliations[full_name] = affiliations
30.590909
79
0.529318
890
8,749
5.032584
0.170787
0.052244
0.015628
0.029694
0.426658
0.366376
0.350301
0.334896
0.273499
0.161197
0
0.001076
0.362899
8,749
285
80
30.698246
0.802476
0.258201
0
0.112903
0
0
0.141677
0.01705
0
0
0
0
0
1
0.120968
false
0
0.048387
0
0.241935
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a849b7bcd124ad715f2ce345cebb1f79d3397f0
1,132
py
Python
discovery-infra/test_infra/helper_classes/config/controller_config.py
lranjbar/assisted-test-infra
89cd4e16744afa646af88975f8038ca1774bcfa4
[ "Apache-2.0" ]
null
null
null
discovery-infra/test_infra/helper_classes/config/controller_config.py
lranjbar/assisted-test-infra
89cd4e16744afa646af88975f8038ca1774bcfa4
[ "Apache-2.0" ]
30
2021-11-15T07:10:49.000Z
2022-03-28T07:10:26.000Z
discovery-infra/test_infra/helper_classes/config/controller_config.py
lranjbar/assisted-test-infra
89cd4e16744afa646af88975f8038ca1774bcfa4
[ "Apache-2.0" ]
null
null
null
from abc import ABC from pathlib import Path from typing import Any from dataclasses import dataclass from test_infra import consts from test_infra.utils.global_variables import GlobalVariables from .base_config import _BaseConfig global_variables = GlobalVariables() @dataclass class BaseNodeConfig(_BaseConfig, ABC): platform: str = None is_ipv6: bool = None bootstrap_in_place: bool = None private_ssh_key_path: Path = None working_dir: str = consts.WORKING_DIR master_memory: int = None master_vcpu: int = None masters_count: int = None nodes_count: int = None master_cpu_mode: str = None master_disk: int = None # disk size in MB. master_disk_size_gib: str = None # disk size in GB. master_disk_count: int = None # number of disks to create worker_memory: int = None worker_vcpu: int = None workers_count: int = None worker_cpu_mode: str = None worker_disk: int = None worker_disk_count: int = None network_mtu: int = None @staticmethod def get_default(key, default=None) -> Any: return getattr(global_variables, key)
26.325581
63
0.719965
158
1,132
4.917722
0.405063
0.108108
0.07722
0.036036
0
0
0
0
0
0
0
0.001133
0.219965
1,132
42
64
26.952381
0.878822
0.05212
0
0
0
0
0
0
0
0
0
0
0
1
0.030303
false
0
0.212121
0.030303
0.909091
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a84b5159878bc48cef9594078edc989fb798f13
952
py
Python
bitcoinpy/mempool.py
obulpathi/bitcoinpy
8f41e0221f2ff2d35697b6d4e5397deb7de09c3d
[ "MIT" ]
21
2016-01-03T14:52:07.000Z
2021-08-09T18:05:08.000Z
bitcoinpy/mempool.py
obulpathi/bitcoinpy
8f41e0221f2ff2d35697b6d4e5397deb7de09c3d
[ "MIT" ]
null
null
null
bitcoinpy/mempool.py
obulpathi/bitcoinpy
8f41e0221f2ff2d35697b6d4e5397deb7de09c3d
[ "MIT" ]
15
2015-02-07T20:08:11.000Z
2019-10-03T04:45:45.000Z
# MemPool.py # # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import logging from lib.serialize import uint256_to_shortstr class MemPool(object): def __init__(self): self.pool = {} # setup logging logging.basicConfig(level=logging.DEBUG) self.logger = logging.getLogger(__name__) def add(self, tx): tx.calc_sha256() hash = tx.sha256 hashstr = uint256_to_shortstr(hash) if hash in self.pool: self.log.write("MemPool.add(%s): already known" % (hashstr,)) return False if not tx.is_valid(): self.log.write("MemPool.add(%s): invalid TX" % (hashstr, )) return False self.pool[hash] = tx self.log.write("MemPool.add(%s), poolsz %d" % (hashstr, len(self.pool))) return True def remove(self, hash): if hash not in self.pool: return False del self.pool[hash] return True def size(self): return len(self.pool)
23.8
74
0.703782
142
952
4.619718
0.471831
0.085366
0.054878
0.08689
0.105183
0.105183
0
0
0
0
0
0.01761
0.164916
952
39
75
24.410256
0.807547
0.169118
0
0.185185
0
0
0.105732
0
0
0
0
0
0
1
0.148148
false
0
0.074074
0.037037
0.481481
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a84b6d6b4154a68e4e0a9485928b636bf10d1b0
13,530
py
Python
bashspy/parser.py
sarvi/bashspy
088f2cdfd00d29b4c7a98ec311f6e6c382ba4749
[ "MIT" ]
null
null
null
bashspy/parser.py
sarvi/bashspy
088f2cdfd00d29b4c7a98ec311f6e6c382ba4749
[ "MIT" ]
1
2021-06-12T12:47:44.000Z
2021-06-12T12:47:44.000Z
bashspy/parser.py
sarvi/bashspy
088f2cdfd00d29b4c7a98ec311f6e6c382ba4749
[ "MIT" ]
1
2020-05-18T08:55:14.000Z
2020-05-18T08:55:14.000Z
''' Created on Jun 13, 2019 @author: sarvi ''' from sly import Parser from .lexer import BashLexer class ASTCommands(list): __slots__ = ('grouping') def __init__(self, command, grouping=None): self.append(command) self.grouping = grouping def __repr__(self): x=[str(i) for i in self] if self.grouping: x.insert(0, self.grouping[0]) x.append(self.grouping[1]) return '\n'.join(x) class ASTCommand: __slots__ = ('assignments', 'executable', 'arguments', 'redirections', 'pipetocmd') def __init__(self, executable=None, assignments=None, arguments=None, redirections=None, pipetocmd=None): self.executable = executable self.assignments = assignments or list() self.arguments = arguments or list() self.redirections = redirections or list() self.pipetocmd = pipetocmd def __repr__(self): if self.executable: return ('%s %s %s %s %s' % (' '.join([str(i) for i in self.assignments]), self.executable, ' '.join([str(i) for i in self.arguments]), ' '.join([str(i) for i in self.redirections]), '| %s'%self.pipetocmd if self.pipetocmd else '')).strip() else: return ' '.join([str(i) for i in self.assignments]) class ASTAssignment: __slots__ = ('variable', 'assignop', 'value') def __init__(self, variable, assignop, value=None): self.variable = variable self.assignop = assignop self.value = value def __repr__(self): return '%s%s%s'%(self.variable, self.assignop, self.value or '') class ASTArgument: __slots__ = ('option', 'value') def __init__(self, option=None, value=None): self.option = option self.value = value def __repr__(self): return '%s=%s'%(self.option, self.value) if self.option and self.value else (self.option or self.value) class ASTRedirection: __slots__ = ('redirect', 'file') def __init__(self, redirect, file): self.redirect = redirect self.file = file def __repr__(self): return '%s%s'%(self.redirect, self.file) if self.file else '%s'%(self.redirect) class ASTTestCombination: __slots__ = ('leftexpr', 'combination', 'rightexpr', 'test_command', 'group') def __init__(self, combination, rightexpr, leftexpr=None, test_command=False, group=False): self.combination = combination self.rightexpr = rightexpr self.leftexpr = leftexpr self.test_command = test_command self.group = group def __repr__(self): if self.leftexpr: return '%s %s %s'%(self.leftexpr, self.combination, self.rightexpr) elif self.combination: return '%s %s'%(self.combination, self.rightexpr) elif self.test_command: return '[ %s ]'%(self.rightexpr) elif self.group: return '( %s )'%(self.rightexpr) else: return '%s'%(self.rightexpr) class ASTTestCondition: __slots__ = ('leftvalue', 'test', 'rightvalue') def __init__(self, test, rightvalue, leftvalue=None): self.test = test self.leftvalue = leftvalue self.rightvalue = rightvalue def __repr__(self): if self.test: return '%s %s %s'%(self.leftvalue, self.test, self.rightvalue) if self.leftvalue else '%s %s'%(self.test, self.rightvalue) else: return '%s' % (self.rightvalue) class ASTIfCommand: __slots__ = ('test_commands', 'then_commands', 'else_commands') def __init__(self, test_commands, then_commands, else_commands=None): self.test_commands = test_commands self.then_commands = then_commands self.else_commands = else_commands def __repr__(self): if self.else_commands: return 'if %s; then\n%s\nelse\n%s\nfi' % (self.test_commands, self.then_commands, self.else_commands) else: return 'if %s; then\n%s\nfi' % (self.test_commands, self.then_commands) class BashParser(Parser): # Get the token list from the lexer (required) debugfile = 'parser.out' tokens = BashLexer.tokens precedence = ( # ('nonassoc', BOOL_NOT), # ('nonassoc', BOOL_LESS, BOOL_GREATER, BOOL_EQ, BOOL_NEQ), # Nonassociative operators ('left', LIST_COMMANDS), ('left', AMPERSAND, CMDSEP, NEWLINE), ('left', BOOL_COMBINATION), ('left', BOOL_COMPARISON), ('right', BOOL_NOT), # ('right', END_LINE) ) # Grammar rules and actions @_('compound_commands') def program(self, p): print('program(%s)' % (p.compound_commands)) return p.compound_commands @_('compound_command', 'compound_command end_command', 'compound_command end_command compound_commands' ) def compound_commands(self, p): # print('simple_command(%s)' % (list(p))) if getattr(p, 'compound_commands', None): p.compound_commands.insert(0, p.compound_command) return p.compound_commands else: return ASTCommands(p.compound_command) @_( 'group_command', 'list_commands', 'if_command', ) def compound_command(self, p): return p[0] @_( 'LBRACE NEWLINE compound_commands RBRACE', 'LBRACE compound_commands RBRACE', 'LPAREN compound_commands RPAREN', ) def group_command(self, p): if getattr(p, 'LBRACE', None): p.compound_commands.grouping = '{}' elif getattr(p, 'LPAREN', None): p.compound_commands.grouping = '()' return getattr(p, 'compound_commands', None) @_('pipe_command %prec LIST_COMMANDS', 'pipe_command end_pipe', 'pipe_command end_pipe list_commands', 'pipe_command boolean_combination list_commands') def list_commands(self, p): if getattr(p, 'boolean_combination', None): return ASTTestCombination(p.boolean_combination, p.list_commands, p.pipe_command) elif getattr(p, 'list_commands', None): p.list_commands.insert(0, p.pipe_command) return p.list_commands else: return ASTCommands(p.pipe_command) @_('NEWLINE', 'CMDSEP', 'AMPERSAND') def end_pipe(self, p): return None @_('NEWLINE', 'CMDSEP') def end_command(self, p): return None @_('IF list_commands THEN compound_commands FI', 'IF list_commands THEN NEWLINE compound_commands FI', 'IF list_commands THEN compound_commands ELSE compound_commands FI', 'IF list_commands THEN NEWLINE compound_commands ELSE NEWLINE compound_commands FI') def if_command(self, p): if getattr(p, 'ELSE', None): return ASTIfCommand(p.list_commands, p.compound_commands0, p.compound_commands1) else: return ASTIfCommand(p.list_commands, p.compound_commands) # @_( #'test_command', # 'command_pipe', # # 'test_command boolean_combination compound_command', # # 'command_pipe boolean_combination compound_command' # ) # def compound_command(self, p): # if getattr(p, 'boolean_combination', None): # return ASTTestCombination(p.boolean_combination, p.test_commands, p.test_command) # else: # return p.test_command @_('time_command pipe_commands', 'time_command BOOL_NOT pipe_commands', 'pipe_commands', 'BOOL_NOT pipe_commands') def pipe_command(self, p): # print('simple_command(%s)' % (list(p))) cmd = p.pipe_commands if getattr(p, 'BOOL_NOT', None): cmd = ASTTestCombination(p.BOOL_NOT, p.pipe_commands) return cmd @_('TIME', 'TIME TIME_OPTP') def time_command(self, p): cmd = ASTCommand(p.TIME) if getattr(p, 'TIME_OPTP', None): cmd.arguments = [p.TIME_OPTP] return cmd @_('simple_command', 'simple_command PIPE pipe_commands') def pipe_commands(self, p): # print('simple_command(%s)' % (list(p))) if getattr(p, 'PIPE', None): p.simple_command.pipetocmd = p.pipe_commands return p.simple_command @_('assignments', 'base_command', 'assignments base_command', 'base_command redirects', 'assignments base_command redirects') def simple_command(self, p): # print('simple_command(%s)' % (list(p))) cmd = p.base_command if getattr(p, 'base_command', None) else ASTCommand() if getattr(p, 'redirects', None): cmd.redirections = p.redirects if getattr(p, 'assignments', None): cmd.assignments = p.assignments return cmd @_('redirect', 'redirect redirects') def redirects(self, p): return [p.redirect] if len(p)==1 else [p.redirect] + p.redirects @_('REDIRECT', 'REDIRECT WORD') def redirect(self, p): # print('assignment(%s)' % (list(p))) return ASTRedirection(p.REDIRECT, getattr(p, 'WORD', None)) @_('echo_command', 'exec_command', 'test_command') def base_command(self, p): if len(p)==2: p[1].assignments = p.assignments.assignments return p[1] else: return p[0] @_('LBRACK test_expressions RBRACK', 'LDBRACK test_expressions RDBRACK') def test_command(self, p): if getattr(p, 'BOOL_NOT', None): return ASTTestCombination(p.BOOL_NOT, p.command_pipe) elif getattr(p, 'command_pipe', None): return ASTTestCombination(None, p.command_pipe) else: return ASTTestCombination(None, p.test_expressions, test_command=True) @_('test_expression', 'LPAREN test_expressions RPAREN', 'BOOL_NOT test_expressions %prec BOOL_NOT', 'test_expressions boolean_combination test_expressions %prec BOOL_COMBINATION' ) def test_expressions(self, p): if getattr(p, 'BOOL_NOT', None): return ASTTestCombination(p.BOOL_NOT, p.test_expressions) elif getattr(p, 'boolean_combination', None): return ASTTestCombination(p.boolean_combination, p.test_expressions1, p.test_expressions0) elif getattr(p, 'LPAREN', None): return ASTTestCombination(None, p.test_expressions, group=True) else: return p.test_expression @_('BOOL_OR', 'BOOL_AND') def boolean_combination(self, p): return p[0] @_('value boolean_comparison value %prec BOOL_COMPARISON', 'OPTION value') def test_expression(self, p): if getattr(p, 'BOOL_NOT', None): return ASTTestCombination(p.BOOL_NOT, p.test_expression) elif getattr(p, 'LPAREN', None): return ASTTestCombination(None, p.test_expressions, group=True) elif getattr(p, 'OPTION', None): return ASTTestCondition(p.boolean_comparison, p.value) else: return ASTTestCondition(p.boolean_comparison, p.value1, p.value0) @_('OPTION', 'BOOL_EQ', 'BOOL_NEQ', 'BOOL_LESS', 'BOOL_GREATER', 'ASSIGN') def boolean_comparison(self, p): return p[0] # @_( # 'for_command', # 'case_command', # 'WHILE compound_list DO compound_list DONE', # 'UNTIL compound_list DO compound_list DONE', # 'select_command', # 'if_command', # 'subshell', # 'group_command', # 'arith_command' # 'cond_command', # 'arith_for_command' # ) # def shell_command(self, p): # print('assignments(%s)' % (list(p))) # return list(p) @_('ECHO ECHO_STRING') def echo_command(self, p): return ASTCommand(p[0], None, [p[1]]) @_('WORD', 'WORD arguments') def exec_command(self, p): return ASTCommand(p[0], None, getattr(p, 'arguments', None), getattr(p, 'redirects', None)) @_('argument', 'argument arguments') def arguments(self, p): return [p.argument] if len(p)==1 else [p.argument] + p.arguments @_('OPTION ASSIGN', 'OPTION', 'arg_value') def argument(self, p): # print('assignment(%s)' % (list(p))) return ASTArgument(getattr(p, 'OPTION', None), getattr(p, 'arg_value', None)) @_('value', 'WORD') def arg_value(self, p): # print('value(%s)' % (list(p))) return p[0] @_('assignment', 'assignment assignments') def assignments(self, p): return [p.assignment] if len(p) == 1 else [p.assignment] + p.assignments @_('LET ID assignop value', 'ID assignop value', 'ID assignop') def assignment(self, p): # print('assignment(%s)' % (list(p))) return ASTAssignment(p.ID, p.assignop, getattr(p, 'value', None)) @_('ASSIGN', 'ARITH_ASSIGN') def assignop(self, p): return p[0] @_('QSTRING', 'DQSTRING', 'BTQUOTED', 'CMD_EXP', 'VAL_STRING', 'VAR_SUBST', 'VARIABLE') def value(self, p): # print('value(%s)' % (list(p))) return p[0] if __name__ == '__main__': lexer = BashLexer() parser = BashParser() while True: try: text = input('Command:>') result = parser.parse(lexer.tokenize(text)) print(result) except EOFError: break
32.760291
135
0.602882
1,539
13,530
5.069526
0.11306
0.019867
0.017944
0.012689
0.334273
0.27557
0.187644
0.166752
0.138298
0.120354
0
0.00334
0.269771
13,530
413
136
32.760291
0.786336
0.111382
0
0.156997
0
0
0.188424
0.001837
0
0
0
0
0
1
0.153584
false
0
0.006826
0.064846
0.423208
0.006826
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a84df704829c829063b750bd4cbc4f7f7261e8a
1,555
py
Python
Module03/pregnancy_wheel.py
biomed-bioinformatics-bootcamp/bmes-t580-2019-coursework-charrison620
4f53e9290b456f582464c86d114c794c1448b995
[ "MIT" ]
null
null
null
Module03/pregnancy_wheel.py
biomed-bioinformatics-bootcamp/bmes-t580-2019-coursework-charrison620
4f53e9290b456f582464c86d114c794c1448b995
[ "MIT" ]
null
null
null
Module03/pregnancy_wheel.py
biomed-bioinformatics-bootcamp/bmes-t580-2019-coursework-charrison620
4f53e9290b456f582464c86d114c794c1448b995
[ "MIT" ]
null
null
null
import datetime def print_header(): print('----------------------------') print(' Due Date APP ') print('----------------------------') print() def get_lmp_from_patient(): print("When was the patient's last normal menstrual cycle? ") date_str = input('Format: [dd/mm/yyyy}') #'05/06/2018' parts = date_str.split('/') if len(parts)!= 3: print('Bad date found', date_str) return get_lmp_from_patient() year = int(parts[2]) month = int(parts[1]) day = int(parts[0]) lmp = datetime.date(year, month, day) #print(lmp) return lmp #avg pregnancy length is 281 days def compute_days_between_dates(original_date, target_date): this_year = datetime.date(target_date.year, original_date.month, original_date.day) dt = this_year - target_date return dt.days def print_due_date_information(min_due_date, max_due_date, expected_due_date): print('Your expected due date is ', expected_due_date.strftime('%a %b %d %Y')) print('But it may be as early as ', min_due_date.strftime('%m/%d/%Y')) print('But as late as ', max_due_date.strftime('%m/%d/%Y')) def main(): print_header() lmp_day = get_lmp_from_patient() gest_length = datetime.timedelta(days = 281) gest_std = datetime.timedelta(days = 13) expected_due_date = lmp_day + gest_length min_due_date = expected_due_date - gest_std max_due_date = expected_due_date + gest_std print_due_date_information(min_due_date, max_due_date, expected_due_date) main()
27.767857
87
0.65209
228
1,555
4.157895
0.350877
0.132911
0.110759
0.075949
0.224684
0.224684
0.183544
0.122363
0.122363
0.122363
0
0.016
0.196141
1,555
56
88
27.767857
0.7424
0.034727
0
0.055556
0
0
0.176785
0.037358
0
0
0
0
0
1
0.138889
false
0
0.027778
0
0.25
0.361111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a86a5adbb68bba6dc2b067c07b59de722a8d5ca
1,940
py
Python
tasks/storm_raffle_handler.py
Ayouuuu/bili2.0
1108e39208e56f129fb5eb6605a5b3f1aadc0d8f
[ "MIT" ]
2
2020-01-03T09:27:53.000Z
2020-04-07T05:06:36.000Z
tasks/storm_raffle_handler.py
Ayouuuu/bili2.0
1108e39208e56f129fb5eb6605a5b3f1aadc0d8f
[ "MIT" ]
null
null
null
tasks/storm_raffle_handler.py
Ayouuuu/bili2.0
1108e39208e56f129fb5eb6605a5b3f1aadc0d8f
[ "MIT" ]
1
2019-08-23T07:43:21.000Z
2019-08-23T07:43:21.000Z
import bili_statistics from reqs.storm_raffle_handler import StormRaffleHandlerReq from tasks.utils import UtilsTask from .base_class import Forced, DontWait, Multi class StormRaffleJoinTask(Forced, DontWait, Multi): TASK_NAME = 'join_storm_raffle' # 为了速度,有时不用等room_id验证就参加,置room_id为0,is_normal_room自然会返回固定值true @staticmethod async def check(user, room_id, raffle_id=None): if not await UtilsTask.is_normal_room(user, room_id): return if raffle_id is not None: json_rsp = {'data': {'id': raffle_id}} else: json_rsp = await user.req_s(StormRaffleHandlerReq.check, user, room_id) next_step_settings = [] data = json_rsp['data'] if data: raffle_id = int(data['id']) if not bili_statistics.is_raffleid_duplicate(raffle_id/1000000): user.info(f'确认获取到飓风暴抽奖 {raffle_id}', with_userid=False) next_step_setting = (-2, (1, 3), room_id, raffle_id) next_step_settings.append(next_step_setting) next_step_setting = (-2, (2, 4), room_id, raffle_id) next_step_settings.append(next_step_setting) bili_statistics.add2raffle_ids(raffle_id/1000000, 'STORM') return next_step_settings @staticmethod async def work(user, room_id, raffle_id): # await UtilsTask.enter_room(user, room_id) json_rsp = await user.req_s(StormRaffleHandlerReq.join, user, raffle_id) bili_statistics.add2joined_raffles('节奏风暴(合计)', user.id) if not json_rsp['code']: data = json_rsp['data'] gift_name = data["gift_name"] gift_num = data["gift_num"] user.info(f'飓风暴({raffle_id})的参与结果: {gift_name}X{gift_num}') bili_statistics.add2results(gift_name, user.id, gift_num) return print(json_rsp)
41.276596
83
0.635567
243
1,940
4.765432
0.337449
0.082902
0.043178
0.048359
0.189983
0.158895
0.158895
0.088083
0.088083
0.088083
0
0.016973
0.271134
1,940
46
84
42.173913
0.80198
0.052577
0
0.210526
0
0
0.073025
0.023978
0
0
0
0
0
1
0
false
0
0.105263
0
0.236842
0.026316
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a86acff1cb947e60b02c94c6dbdcc5c7b79e9bf
4,767
py
Python
u24_lymphocyte/third_party/treeano/sandbox/nodes/gradnet.py
ALSM-PhD/quip_classification
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
[ "BSD-3-Clause" ]
45
2015-04-26T04:45:51.000Z
2022-01-24T15:03:55.000Z
u24_lymphocyte/third_party/treeano/sandbox/nodes/gradnet.py
ALSM-PhD/quip_classification
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
[ "BSD-3-Clause" ]
8
2018-07-20T20:54:51.000Z
2020-06-12T05:36:04.000Z
u24_lymphocyte/third_party/treeano/sandbox/nodes/gradnet.py
ALSM-PhD/quip_classification
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
[ "BSD-3-Clause" ]
22
2018-05-21T23:57:20.000Z
2022-02-21T00:48:32.000Z
import theano import theano.tensor as T import treeano import treeano.nodes as tn fX = theano.config.floatX @treeano.register_node("grad_net_interpolation") class GradNetInterpolationNode(treeano.NodeImpl): """ interpolates outputs between 2 nodes """ hyperparameter_names = ("late_gate",) children_container = treeano.core.DictChildrenContainerSchema( early=treeano.core.ChildContainer, late=treeano.core.ChildContainer, ) input_keys = ("early", "late") def init_state(self, network): children = self.raw_children() early = children["early"] late = children["late"] network.forward_input_to(early.name) network.forward_input_to(late.name) network.take_output_from(early.name, to_key="early") network.take_output_from(late.name, to_key="late") def compute_output(self, network, early_vw, late_vw): late_gate = network.find_hyperparameter(["late_gate"], 1) out_var = (early_vw.variable * (1 - late_gate) + late_vw.variable * late_gate) out_shape = [] assert early_vw.ndim == late_vw.ndim for e, l in zip(early_vw.shape, late_vw.shape): if e is None and l is None: out_shape.append(None) elif e is None: out_shape.append(l) elif l is None: out_shape.append(e) else: assert e == l out_shape.append(e) network.create_vw( "default", variable=out_var, shape=tuple(out_shape), tags={"output"}, ) @treeano.register_node("grad_net_optimizer_interpolation") class _GradNetOptimizerInterpolationNode(treeano.Wrapper1NodeImpl): hyperparameter_names = ("late_gate", "gradnet_epsilon", "epsilon", "multiplicative_inverse_for_early_gate") def init_state(self, network): super(_GradNetOptimizerInterpolationNode, self).init_state(network) epsilon = network.find_hyperparameter(["gradnet_epsilon", "epsilon"], 1e-3) late_gate = network.find_hyperparameter(["late_gate"], 1) late_gate = treeano.utils.as_fX(late_gate) # NOTE: late gate cannot be 0 because the early gate is divide by it # AND multiplied by it. Clipping only for the early gate will cause # no updates to occur. late_gate = T.clip(late_gate, epsilon, 1) use_multiplicative_inverse = network.find_hyperparameter( ["multiplicative_inverse_for_early_gate"], False) if use_multiplicative_inverse: early_gate = epsilon / late_gate else: early_gate = 1 - late_gate network.set_hyperparameter(self.name + "_late_update_scale", "update_scale_factor", late_gate) network.set_hyperparameter(self.name + "_early_update_scale", "update_scale_factor", # these updates are also multiplied by # late_gate later on, so rescale them early_gate / late_gate) def GradNetOptimizerInterpolationNode(name, children, early, late, **kwargs): """ interpolates updates from 2 optimizers nodes NOTE: this is a hack to take in node constructors as arguments """ assert set(children.keys()) == {"subtree", "cost"} subtree = children["subtree"] cost = children["cost"] cost_ref = tn.ReferenceNode(name + "_costref", reference=cost.name) late_subtree = tn.UpdateScaleNode(name + "_late_update_scale", subtree) late_node = late(name + "_late", {"subtree": late_subtree, "cost": cost}) early_subtree = tn.UpdateScaleNode(name + "_early_update_scale", late_node) early_node = early(name + "_early", {"subtree": early_subtree, "cost": cost_ref}) # NOTE: need separate node to forward hyperparameter return _GradNetOptimizerInterpolationNode(name, early_node, **kwargs) def GradualSimpleBatchNormalizationNode(name): from treeano.sandbox.nodes import batch_normalization as bn return GradNetInterpolationNode( name, {"early": bn.SimpleBatchNormalizationNode(name + "_bn"), "late": tn.IdentityNode(name + "_identity")}) GradualBNNode = GradualSimpleBatchNormalizationNode
36.389313
79
0.594084
489
4,767
5.548057
0.292434
0.053078
0.022116
0.015481
0.164394
0.075931
0.06045
0.030962
0
0
0
0.003387
0.318649
4,767
130
80
36.669231
0.831897
0.088945
0
0.10989
0
0
0.103184
0.029747
0
0
0
0
0.032967
1
0.054945
false
0
0.054945
0
0.197802
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a88dea060f9ea00f0bb7c465137c71904b3c14f
662
py
Python
minus80/RawFile.py
brohammer/Minus80
3cd5b61a7349b9fa6d35ed192d8a4f38523f92bb
[ "MIT" ]
null
null
null
minus80/RawFile.py
brohammer/Minus80
3cd5b61a7349b9fa6d35ed192d8a4f38523f92bb
[ "MIT" ]
null
null
null
minus80/RawFile.py
brohammer/Minus80
3cd5b61a7349b9fa6d35ed192d8a4f38523f92bb
[ "MIT" ]
null
null
null
import gzip #pragma: no cover import bz2 #pragma: no cover import lzma #pragma: no cover class RawFile(object):#pragma: no cover def __init__(self,filename): self.filename = filename if filename.endswith('.gz'): self.handle = gzip.open(filename,'rt') elif filename.endswith('bz2'): self.handle = bz2.open(filename,'rt') elif filename.endswith('xz'): self.handle = lzma.open(filenaem,'rt') else: self.handle = open(filename,'r') def __enter__(self): return self.handle def __exit__(self,dtype,value,traceback): self.handle.close()
33.1
50
0.601208
79
662
4.886076
0.405063
0.15544
0.134715
0.098446
0.176166
0.176166
0
0
0
0
0
0.00625
0.274924
662
19
51
34.842105
0.797917
0.096677
0
0
0
0
0.025253
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0.055556
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6a89d65e11282b8c81495e2795c8364f65d2114c
4,724
py
Python
framework/database/__init__.py
fabmiz/osf.io
8d86af3f0a6e5388bd5b18383e68e27b65a66247
[ "Apache-2.0" ]
null
null
null
framework/database/__init__.py
fabmiz/osf.io
8d86af3f0a6e5388bd5b18383e68e27b65a66247
[ "Apache-2.0" ]
null
null
null
framework/database/__init__.py
fabmiz/osf.io
8d86af3f0a6e5388bd5b18383e68e27b65a66247
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import functools import httplib as http import markupsafe from django.core.paginator import Paginator from django.db.models import Q, QuerySet from framework.exceptions import HTTPError def get_or_http_error(Model, pk_or_query, allow_deleted=False, display_name=None): """Load an instance of Model by primary key or modularodm.Q query. Raise an appropriate HTTPError if no record is found or if the query fails to find a unique record :param type Model: StoredObject subclass to query :param pk_or_query: :type pk_or_query: either - a <basestring> representation of the record's primary key, e.g. 'abcdef' - a <QueryBase> subclass query to uniquely select a record, e.g. Q('title', 'eq', 'Entitled') & Q('version', 'eq', 1) :param bool allow_deleted: allow deleleted records? :param basestring display_name: :raises: HTTPError(404) if the record does not exist :raises: HTTPError(400) if no unique record is found :raises: HTTPError(410) if the resource is deleted and allow_deleted = False :return: Model instance """ display_name = display_name or '' # FIXME: Not everything that uses this decorator needs to be markupsafe, but OsfWebRenderer error.mako does... safe_name = markupsafe.escape(display_name) if isinstance(pk_or_query, Q): try: instance = Model.objects.get(pk_or_query) except Model.DoesNotExist: raise HTTPError(http.NOT_FOUND, data=dict( message_long='No {name} record matching that query could be found'.format(name=safe_name) )) except Model.MultipleObjectsReturned: raise HTTPError(http.BAD_REQUEST, data=dict( message_long='The query must match exactly one {name} record'.format(name=safe_name) )) else: instance = Model.load(pk_or_query) if not instance: raise HTTPError(http.NOT_FOUND, data=dict( message_long='No {name} record with that primary key could be found'.format(name=safe_name) )) if getattr(instance, 'is_deleted', False) and getattr(instance, 'suspended', False): raise HTTPError(451, data=dict( # 451 - Unavailable For Legal Reasons message_short='Content removed', message_long='This content has been removed' )) if not allow_deleted and getattr(instance, 'is_deleted', False): raise HTTPError(http.GONE) return instance def autoload(Model, extract_key, inject_key, func): """Decorator to autoload a StoredObject instance by primary key and inject into kwargs. Raises an appropriate HTTPError (see #get_or_http_error) :param type Model: database collection model to query (should be a subclass of StoredObject) :param basestring extract_key: named URL field containing the desired primary key to be fetched from the database :param basestring inject_key: name the instance will be accessible as when it's injected as an argument to the function Example usage: :: def get_node(node_id): node = Node.load(node_id) ... becomes import functools autoload_node = functools.partial(autoload, Node, 'node_id', 'node') @autoload_node def get_node(node): ... """ @functools.wraps(func) def wrapper(*args, **kwargs): primary_key = kwargs.get(extract_key) instance = get_or_http_error(Model, primary_key) kwargs[inject_key] = instance return func(*args, **kwargs) return wrapper def paginated(model, query=None, increment=200, each=True, include=None): """Paginate a MODM query. :param StoredObject model: Model to query. :param Q query: Optional query object. :param int increment: Page size :param bool each: If True, each record is yielded. If False, pages are yielded. """ if include and query: queryset = model.objects.filter(query).include(*include) elif query: queryset = model.objects.filter(query) else: queryset = model.objects.all() # Pagination requires an order by clause, especially when using Postgres. # see: https://docs.djangoproject.com/en/1.10/topics/pagination/#required-arguments if isinstance(queryset, QuerySet) and not queryset.ordered: queryset = queryset.order_by(queryset.model._meta.pk.name) paginator = Paginator(queryset.all(), increment) for page_num in paginator.page_range: page = paginator.page(page_num) if each: for item in page.object_list: yield item else: yield page.object_list
38.406504
114
0.67591
621
4,724
5.035427
0.334944
0.022386
0.017269
0.013431
0.10937
0.07867
0.055644
0.036457
0.036457
0.036457
0
0.006405
0.239839
4,724
122
115
38.721311
0.864383
0.425275
0
0.152542
0
0
0.087692
0
0
0
0
0.008197
0
1
0.067797
false
0
0.101695
0
0.220339
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0