signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def set_proxy_bypass ( domains , network_service = "Ethernet" ) :
'''Sets the domains that can bypass the proxy
domains
An array of domains allowed to bypass the proxy
network _ service
The network service to apply the changes to , this only necessary on
macOS
CLI Example :
. . code - block : : bash
... | servers_str = ' ' . join ( domains )
cmd = 'networksetup -setproxybypassdomains {0} {1}' . format ( network_service , servers_str , )
out = __salt__ [ 'cmd.run' ] ( cmd )
return 'error' not in out |
def clean ( self ) :
"""Checks for the identification and password .
If the combination can ' t be found will raise an invalid sign in error .""" | identification = self . cleaned_data . get ( 'identification' )
password = self . cleaned_data . get ( 'password' )
if identification and password :
self . user_cache = authenticate ( identification = identification , password = password )
if self . user_cache is None :
raise forms . ValidationError ( _... |
def ppo_original_world_model_stochastic_discrete ( ) :
"""Atari parameters with stochastic discrete world model as policy .""" | hparams = ppo_original_params ( )
hparams . policy_network = "next_frame_basic_stochastic_discrete"
hparams_keys = hparams . values ( ) . keys ( )
video_hparams = basic_stochastic . next_frame_basic_stochastic_discrete ( )
for ( name , value ) in six . iteritems ( video_hparams . values ( ) ) :
if name in hparams_k... |
def dtime ( sdat , tstart = None , tend = None ) :
"""Time increment dt .
Compute dt as a function of time .
Args :
sdat ( : class : ` ~ stagpy . stagyydata . StagyyData ` ) : a StagyyData instance .
tstart ( float ) : time at which the computation should start . Use the
beginning of the time series data ... | tseries = sdat . tseries_between ( tstart , tend )
time = tseries [ 't' ] . values
return time [ 1 : ] - time [ : - 1 ] , time [ : - 1 ] |
def _iter_full_paths ( path_list ) :
"""Iterates over all paths that are in a directory and its subdirectory , returning
fully - specified paths .""" | for path in path_list :
if not os . path . isdir ( path ) :
full_path = os . path . realpath ( path )
yield path
else :
for root , dirs , filenames in os . walk ( path ) :
for filename in filenames :
full_path = os . path . realpath ( os . path . join ( root ,... |
def moderate_model ( ParentModel , publication_date_field = None , enable_comments_field = None ) :
"""Register a parent model ( e . g . ` ` Blog ` ` or ` ` Article ` ` ) that should receive comment moderation .
: param ParentModel : The parent model , e . g . a ` ` Blog ` ` or ` ` Article ` ` model .
: param p... | attrs = { 'auto_close_field' : publication_date_field , 'auto_moderate_field' : publication_date_field , 'enable_field' : enable_comments_field , }
ModerationClass = type ( ParentModel . __name__ + 'Moderator' , ( FluentCommentsModerator , ) , attrs )
moderator . register ( ParentModel , ModerationClass ) |
def create_service ( self , name , ** kwargs ) :
"""Creates a service with a name . All other parameters are optional . They
are : ` note ` , ` hourly _ rate ` , ` billable ` , and ` archived ` .""" | data = self . _wrap_dict ( "service" , kwargs )
data [ "customer" ] [ "name" ] = name
return self . post ( "/services.json" , data = data ) |
def Kdiag ( self , X , target ) :
"""Compute the diagonal of the covariance matrix for X .""" | np . add ( target , self . variance , target ) |
def get_winfunc ( libname , funcname , restype = None , argtypes = ( ) , _libcache = { } ) :
"""Retrieve a function from a library / DLL , and set the data types .""" | if libname not in _libcache :
_libcache [ libname ] = windll . LoadLibrary ( libname )
func = getattr ( _libcache [ libname ] , funcname )
func . argtypes = argtypes
func . restype = restype
return func |
def _combine_coverages ( items , work_dir , input_backs = None ) :
"""Combine coverage cnns calculated for individual inputs into single file .
Optionally moves over pre - calculated coverage samples from a background file .""" | out_file = os . path . join ( work_dir , "sample_coverages.txt" )
if not utils . file_exists ( out_file ) :
with file_transaction ( items [ 0 ] , out_file ) as tx_out_file :
with open ( tx_out_file , 'w' ) as out_f :
for data in items :
cov_file = tz . get_in ( [ "depth" , "bins"... |
def validate ( ctx , mapfiles , expand ) :
"""Validate Mapfile ( s ) against the Mapfile schema
The MAPFILES argument is a list of paths , either to individual Mapfiles , or a folders containing Mapfiles .
Wildcards are supported ( natively on Linux , and up to one level deep on Windows ) .
Validation errors ... | all_mapfiles = get_mapfiles ( mapfiles )
if len ( all_mapfiles ) == 0 :
click . echo ( "No Mapfiles found at the following paths: {}" . format ( "," . join ( mapfiles ) ) )
return
validation_count = 0
errors = 0
for fn in all_mapfiles :
fn = click . format_filename ( fn )
d = mappyfile . open ( fn , exp... |
def _transform ( self , data , transform , step_size ) :
'''Transform the data . If the transform is not supported by this series ,
returns the data unaltered .''' | if transform == 'mean' :
total = sum ( data )
count = len ( data )
data = float ( total ) / float ( count ) if count > 0 else 0
elif transform == 'count' :
data = len ( data )
elif transform == 'min' :
data = min ( data or [ 0 ] )
elif transform == 'max' :
data = max ( data or [ 0 ] )
elif trans... |
def sense_ttf ( self , target ) :
"""Sense for a Type F Target is supported for 212 and 424 kbps .""" | log . debug ( "polling for NFC-F technology" )
if target . brty not in ( "212F" , "424F" ) :
message = "unsupported bitrate {0}" . format ( target . brty )
raise nfc . clf . UnsupportedTargetError ( message )
self . chipset . in_set_rf ( target . brty )
self . chipset . in_set_protocol ( self . chipset . in_set... |
def generate_epochs_info ( epoch_list ) :
"""use epoch _ list to generate epoch _ info defined below
Parameters
epoch _ list : list of 3D ( binary ) array in shape [ condition , nEpochs , nTRs ]
Contains specification of epochs and conditions , assuming
1 . all subjects have the same number of epochs ;
2 ... | time1 = time . time ( )
epoch_info = [ ]
for sid , epoch in enumerate ( epoch_list ) :
for cond in range ( epoch . shape [ 0 ] ) :
sub_epoch = epoch [ cond , : , : ]
for eid in range ( epoch . shape [ 1 ] ) :
r = np . sum ( sub_epoch [ eid , : ] )
if r > 0 : # there is an epo... |
def get_user ( self , user ) :
"""Get user ' s data ( first and last name , email , etc ) .
Args :
user ( string ) : User name .
Returns :
( dictionary ) : User ' s data encoded in a dictionary .
Raises :
requests . HTTPError on failure .""" | self . project_service . set_auth ( self . _token_project )
return self . project_service . get_user ( user ) |
def create ( cls , ** kwargs ) :
"""Build and return a ` ScatterGather ` object""" | linkname = kwargs . setdefault ( 'linkname' , cls . clientclass . linkname_default )
# Don ' t use setdefault b / c we don ' t want to build a JobArchive
# Unless it is needed
job_archive = kwargs . get ( 'job_archive' , None )
if job_archive is None :
job_archive = JobArchive . build_temp_job_archive ( )
kwarg... |
def load ( self , filename ) :
'''load points from a file .
returns number of points loaded''' | f = open ( filename , mode = 'r' )
self . clear ( )
for line in f :
if line . startswith ( '#' ) :
continue
line = line . strip ( )
if not line :
continue
a = line . split ( )
if len ( a ) != 2 :
raise MAVFenceError ( "invalid fence point line: %s" % line )
self . add_lat... |
def insert_before ( self , obj , value , recursive = True ) :
"""Insert * value * immediately before * obj * .
* obj * can be either a string , a : class : ` . Node ` , or another
: class : ` . Wikicode ` object ( as created by : meth : ` get _ sections ` , for
example ) . If * obj * is a string , we will ope... | if isinstance ( obj , ( Node , Wikicode ) ) :
context , index = self . _do_strong_search ( obj , recursive )
context . insert ( index . start , value )
else :
for exact , context , index in self . _do_weak_search ( obj , recursive ) :
if exact :
context . insert ( index . start , value )... |
def filter_args_to_dict ( filter_dict , accepted_filter_keys = [ ] ) :
"""Cast and validate filter args .
: param filter _ dict : Filter kwargs
: param accepted _ filter _ keys : List of keys that are acceptable to use .""" | out_dict = { }
for k , v in filter_dict . items ( ) : # make sure that the filter k is acceptable
# and that there is a value associated with the key
if k not in accepted_filter_keys or v is None :
logger . debug ( 'Filter was not in accepted_filter_keys or value is None.' )
# skip it
contin... |
def gen_radio_edit ( sig_dic ) :
'''editing for HTML radio control .''' | edit_zuoxiang = '''7
<label for="{0}"><span>
<a class="glyphicon glyphicon-star" style="color: red;font-size: xx-small;">
</a>{1}</span>
''' . format ( sig_dic [ 'en' ] , sig_dic [ 'zh' ] )
dic_tmp = sig_dic [ 'dic' ]
for key in dic_tmp . keys ( ) :
tmp_str = '''
<input id="{0}" name="{0}" ... |
def fog ( x , severity = 1 ) :
"""Fog corruption to images .
Adding fog to images . Fog is generated by diamond - square algorithm .
Args :
x : numpy array , uncorrupted image , assumed to have uint8 pixel in [ 0,255 ] .
severity : integer , severity of corruption .
Returns :
numpy array , image with ui... | c = [ ( 1.5 , 2 ) , ( 2. , 2 ) , ( 2.5 , 1.7 ) , ( 2.5 , 1.5 ) , ( 3. , 1.4 ) ] [ severity - 1 ]
x = np . array ( x ) / 255.
max_val = x . max ( )
mapsize = 512
shape = x . shape
max_length = max ( shape [ 0 ] , shape [ 1 ] )
if max_length > mapsize :
mapsize = 2 ** int ( np . ceil ( np . log2 ( float ( max_length ... |
def fetchThreadMessages ( self , thread_id = None , limit = 20 , before = None ) :
"""Get the last messages in a thread
: param thread _ id : User / Group ID to get messages from . See : ref : ` intro _ threads `
: param limit : Max . number of messages to retrieve
: param before : A timestamp , indicating fr... | thread_id , thread_type = self . _getThread ( thread_id , None )
params = { "id" : thread_id , "message_limit" : limit , "load_messages" : True , "load_read_receipts" : True , "before" : before , }
j = self . graphql_request ( GraphQL ( doc_id = "1860982147341344" , params = params ) )
if j . get ( "message_thread" ) i... |
def remove_column ( self , key ) :
""": param key : str of the column to remove from every row in the table
: return : None""" | if isinstance ( key , int ) :
index = key
key = self . row_columns [ key ]
else :
index = self . _column_index [ key ]
for row in self . table :
row . pop ( index )
self . row_columns = self . row_columns [ : index ] + self . row_columns [ index + 1 : ]
self . pop_column ( key ) |
def enkf ( self ) :
"""Loop over time windows and apply da
: return :""" | for cycle_index , time_point in enumerate ( self . timeline ) :
if cycle_index >= len ( self . timeline ) - 1 : # Logging : Last Update cycle has finished
break
print ( "Print information about this assimilation Cycle ???" )
# should be handeled in Logger
# each cycle should have a dictionary of... |
def _wait_for_machine_finish ( self , name ) :
"""Interna method
wait until machine is really destroyed , machine does not exist .
: param name : str machine name
: return : True or exception""" | # TODO : rewrite it using probes module in utils
for foo in range ( constants . DEFAULT_RETRYTIMEOUT ) :
time . sleep ( constants . DEFAULT_SLEEP )
out = run_cmd ( [ "machinectl" , "--no-pager" , "status" , name ] , ignore_status = True , return_output = True )
if out != 0 :
return True
raise ConuEx... |
def is_magic ( self ) :
"""Return True iff this method is a magic method ( e . g . , ` _ _ str _ _ ` ) .""" | return ( self . name . startswith ( '__' ) and self . name . endswith ( '__' ) and self . name not in VARIADIC_MAGIC_METHODS ) |
def check_permission ( self , request , page , permission ) :
"""Runs the custom permission check and raises an
exception if False .""" | if not getattr ( page , "can_" + permission ) ( request ) :
raise PermissionDenied |
def _get_primary_type ( ttypes , parent , logstream = stderr ) :
"""Check for multiple transcript types and , if possible , select one .""" | if len ( ttypes ) > 1 :
if logstream : # pragma : no branch
message = '[tag::transcript::primary_transcript]'
message += ' WARNING: feature {:s}' . format ( parent . slug )
message += ' has multiple associated transcript types'
message += ' {}' . format ( ttypes )
print ( mes... |
def trace ( self , * attributes ) :
"""Function decorator that traces functions
NOTE : Must be placed after the @ app . route decorator
@ param attributes any number of flask . Request attributes
( strings ) to be set as tags on the created span""" | def decorator ( f ) :
def wrapper ( * args , ** kwargs ) :
if self . _trace_all_requests :
return f ( * args , ** kwargs )
self . _before_request_fn ( list ( attributes ) )
try :
r = f ( * args , ** kwargs )
self . _after_request_fn ( )
except Exce... |
def example_df ( ) :
"""Create an example dataframe .""" | country_names = [ 'Germany' , 'France' , 'Indonesia' , 'Ireland' , 'Spain' , 'Vatican' ]
population = [ 82521653 , 66991000 , 255461700 , 4761865 , 46549045 , None ]
population_time = [ dt . datetime ( 2016 , 12 , 1 ) , dt . datetime ( 2017 , 1 , 1 ) , dt . datetime ( 2017 , 1 , 1 ) , None , # Ireland
dt . datetime ( 2... |
def verify ( self , ** kwargs ) :
"""Authorization Request parameters that are OPTIONAL in the OAuth 2.0
specification MAY be included in the OpenID Request Object without also
passing them as OAuth 2.0 Authorization Request parameters , with one
exception : The scope parameter MUST always be present in OAuth... | super ( AuthorizationRequest , self ) . verify ( ** kwargs )
clear_verified_claims ( self )
args = { }
for arg in [ "keyjar" , "opponent_id" , "sender" , "alg" , "encalg" , "encenc" ] :
try :
args [ arg ] = kwargs [ arg ]
except KeyError :
pass
if "opponent_id" not in kwargs :
args [ "oppone... |
def _get_state ( ) :
'''Returns the state of connman''' | try :
return pyconnman . ConnManager ( ) . get_property ( 'State' )
except KeyError :
return 'offline'
except dbus . DBusException as exc :
raise salt . exceptions . CommandExecutionError ( 'Connman daemon error: {0}' . format ( exc ) ) |
def _get_and_write_fp ( self , iso_path , outfp , blocksize ) : # type : ( bytes , BinaryIO , int ) - > None
'''An internal method to fetch a single file from the ISO and write it out
to the file object .
Parameters :
iso _ path - The absolute path to the file to get data from .
outfp - The file object to w... | try :
return self . _get_file_from_iso_fp ( outfp , blocksize , None , None , iso_path )
except pycdlibexception . PyCdlibException :
pass
try :
return self . _get_file_from_iso_fp ( outfp , blocksize , iso_path , None , None )
except pycdlibexception . PyCdlibException :
pass
self . _get_file_from_iso_... |
def _pre_job_handling ( self , job ) :
"""Some code executed before actually processing the job .
: param VFGJob job : the VFGJob object .
: return : None""" | # did we reach the final address ?
if self . _final_address is not None and job . addr == self . _final_address : # our analysis should be termianted here
l . debug ( "%s is viewed as a final state. Skip." , job )
raise AngrSkipJobNotice ( )
l . debug ( "Handling VFGJob %s" , job )
if not self . _top_task :
... |
def save ( self , update_site = False , * args , ** kwargs ) :
"""Set the site to the current site when the record is first
created , or the ` ` update _ site ` ` argument is explicitly set
to ` ` True ` ` .""" | if update_site or ( self . id is None and self . site_id is None ) :
self . site_id = current_site_id ( )
super ( SiteRelated , self ) . save ( * args , ** kwargs ) |
def connections_from_graph ( env , G , edge_data = False ) :
"""Create connections for agents in the given environment from the given
NetworkX graph structure .
: param env :
Environment where the agents live . The environment should be derived
from : class : ` ~ creamas . core . environment . Environment `... | if not issubclass ( G . __class__ , ( Graph , DiGraph ) ) :
raise TypeError ( "Graph structure must be derived from Networkx's " "Graph or DiGraph." )
if not hasattr ( env , 'get_agents' ) :
raise TypeError ( "Parameter 'env' must have get_agents." )
addrs = env . get_agents ( addr = True )
if len ( addrs ) != ... |
def comment_filter ( comment_text ) :
"""Passed comment text to be rendered through the function defined
by the ` ` COMMENT _ FILTER ` ` setting . If no function is defined
( the default ) , Django ' s ` ` linebreaksbr ` ` and ` ` urlize ` ` filters
are used .""" | filter_func = settings . COMMENT_FILTER
if not filter_func :
def filter_func ( s ) :
return linebreaksbr ( urlize ( s , autoescape = True ) , autoescape = True )
elif not callable ( filter_func ) :
filter_func = import_dotted_path ( filter_func )
return filter_func ( comment_text ) |
def saved ( name , source = 'running' , user = None , group = None , mode = None , attrs = None , makedirs = False , dir_mode = None , replace = True , backup = '' , show_changes = True , create = True , tmp_dir = '' , tmp_ext = '' , encoding = None , encoding_errors = 'strict' , allow_empty = False , follow_symlinks =... | ret = __salt__ [ 'net.config' ] ( source = source )
if not ret [ 'result' ] :
return { 'name' : name , 'changes' : { } , 'result' : False , 'comment' : ret [ 'comment' ] }
return __states__ [ 'file.managed' ] ( name , user = user , group = group , mode = mode , attrs = attrs , makedirs = makedirs , dir_mode = dir_m... |
def encode_chunk ( dataframe ) :
"""Return a file - like object of CSV - encoded rows .
Args :
dataframe ( pandas . DataFrame ) : A chunk of a dataframe to encode""" | csv_buffer = six . StringIO ( )
dataframe . to_csv ( csv_buffer , index = False , header = False , encoding = "utf-8" , float_format = "%.15g" , date_format = "%Y-%m-%d %H:%M:%S.%f" , )
# Convert to a BytesIO buffer so that unicode text is properly handled .
# See : https : / / github . com / pydata / pandas - gbq / is... |
def to_dict ( self ) :
"""Converts the set of parameters into a dict""" | return dict ( ( parameter . name , parameter . value ) for parameter in self . values ( ) ) |
def by_group ( self ) : # pragma : no cover
"""Display group membership sorted by group .
Returns :
Array with a dictionary of group membership .
For example : { ' testgroup ' : [ ' test . user ' , ' test . user2 ' ] }""" | group_membership = { }
for record in self . __get_groups_with_membership ( ) :
group_membership [ record . cn . value ] = [ i for i in record . memberUid . values ]
return group_membership |
def cycle_string ( key_arn , source_plaintext , botocore_session = None ) :
"""Encrypts and then decrypts a string under a KMS customer master key ( CMK ) .
: param str key _ arn : Amazon Resource Name ( ARN ) of the KMS CMK
: param bytes source _ plaintext : Data to encrypt
: param botocore _ session : exist... | # Create a KMS master key provider
kms_kwargs = dict ( key_ids = [ key_arn ] )
if botocore_session is not None :
kms_kwargs [ "botocore_session" ] = botocore_session
master_key_provider = aws_encryption_sdk . KMSMasterKeyProvider ( ** kms_kwargs )
# Encrypt the plaintext source data
ciphertext , encryptor_header = ... |
def run ( connection ) :
"""Ensure that we have snapshots for a given volume
: type connection : boto . ec2 . connection . EC2Connection
: param connection : EC2 connection object
: returns : None""" | volumes = volume_manager . get_watched_volumes ( connection )
for volume in volumes :
_ensure_snapshot ( connection , volume )
_remove_old_snapshots ( connection , volume ) |
def results ( self ) :
"""Print results""" | print ( "" )
per = int ( round ( ( float ( self . cf ) / ( self . cf + self . cn ) ) * 100 ) )
if per > 90 :
color = self . meta . color [ "GREEN" ]
elif per < 90 and per > 60 :
color = self . meta . color [ "YELLOW" ]
elif per < 60 :
color = self . meta . color [ "RED" ]
health = "{0}{1}%{2}" . format ( co... |
def get_what_txt ( self ) :
"""Overrides the base behaviour defined in ValidationError in order to add details about the class field .
: return :""" | return 'field [{field}] for class [{clazz}]' . format ( field = self . get_variable_str ( ) , clazz = self . validator . get_validated_class_display_name ( ) ) |
def host_diskpool_get_info ( self , disk_pool = None ) :
"""Retrieve diskpool information .
: param str disk _ pool : the disk pool info . It use ' : ' to separate
disk pool type and pool name , eg " ECKD : eckdpool " or " FBA : fbapool "
: returns : Dictionary describing disk pool usage info""" | # disk _ pool must be assigned . disk _ pool default to None because
# it is more convenient for users to just type function name when
# they want to get the disk pool info of CONF . zvm . disk _ pool
if disk_pool is None :
disk_pool = CONF . zvm . disk_pool
if ':' not in disk_pool :
msg = ( 'Invalid input para... |
def _hash_to_sh_address ( script_hash , witness = False , cashaddr = True ) :
'''bytes , bool , bool - > str
cashaddrs are preferred where possible
but cashaddr is ignored in most cases
is there a better way to structure this ?''' | addr_bytes = bytearray ( )
if riemann . network . CASHADDR_P2SH is not None and cashaddr :
addr_bytes . extend ( riemann . network . CASHADDR_P2SH )
addr_bytes . extend ( script_hash )
return riemann . network . CASHADDR_ENCODER . encode ( addr_bytes )
if witness :
addr_bytes . extend ( riemann . networ... |
def check_stat ( self , path ) :
"""Checks logfile stat information for excluding files not in datetime period .
On Linux it ' s possible to checks only modification time , because file creation info
are not available , so it ' s possible to exclude only older files .
In Unix BSD systems and windows informati... | statinfo = os . stat ( path )
st_mtime = datetime . fromtimestamp ( statinfo . st_mtime )
if platform . system ( ) == 'Linux' :
check = st_mtime >= self . start_dt
else :
st_ctime = datetime . fromtimestamp ( statinfo . st_ctime )
check = st_mtime >= self . start_dt and st_ctime <= self . end_dt
if not chec... |
def setDocuments ( self , documenting_pid , documented_pid ) :
"""Add a CiTO , the Citation Typing Ontology , triple asserting that
` ` documenting _ pid ` ` documents ` ` documented _ pid ` ` .
Adds assertion : ` ` documenting _ pid cito : documents documented _ pid ` `
Args :
documenting _ pid : str
PID... | self . _check_initialized ( )
documenting_id = self . getObjectByPid ( documenting_pid )
documented_id = self . getObjectByPid ( documented_pid )
self . add ( ( documenting_id , CITO . documents , documented_id ) ) |
def split_shard ( self , project_name , logstore_name , shardId , split_hash ) :
"""split a readwrite shard into two shards
Unsuccessful opertaion will cause an LogException .
: type project _ name : string
: param project _ name : the Project name
: type logstore _ name : string
: param logstore _ name :... | headers = { }
params = { "action" : "split" , "key" : split_hash }
resource = "/logstores/" + logstore_name + "/shards/" + str ( shardId )
( resp , header ) = self . _send ( "POST" , project_name , None , resource , params , headers )
return ListShardResponse ( resp , header ) |
def vector_generate ( start_pt , end_pt , normalize = False ) :
"""Generates a vector from 2 input points .
: param start _ pt : start point of the vector
: type start _ pt : list , tuple
: param end _ pt : end point of the vector
: type end _ pt : list , tuple
: param normalize : if True , the generated ... | try :
if start_pt is None or len ( start_pt ) == 0 or end_pt is None or len ( end_pt ) == 0 :
raise ValueError ( "Input points cannot be empty" )
except TypeError as e :
print ( "An error occurred: {}" . format ( e . args [ - 1 ] ) )
raise TypeError ( "Input must be a list or tuple" )
except Excepti... |
def xgboost_installed ( ) :
"""Checks that * xgboost * is available .""" | try :
import xgboost
except ImportError :
return False
from xgboost . core import _LIB
try :
_LIB . XGBoosterDumpModelEx
except AttributeError : # The version is not recent enough even though it is version 0.6.
# You need to install xgboost from github and not from pypi .
return False
from xgboost impor... |
def get_baremetal_physnet ( self , context ) :
"""Returns dictionary which contains mac to hostname mapping""" | port = context . current
host_id = context . host
cmd = [ 'show network physical-topology hosts' ]
try :
response = self . _run_eos_cmds ( cmd )
binding_profile = port . get ( portbindings . PROFILE , { } )
link_info = binding_profile . get ( 'local_link_information' , [ ] )
for link in link_info :
... |
def currentText ( self ) :
"""Returns the text that is available currently , if the user has set standard text , then that is returned , otherwise the hint is returned .
: return < str >""" | text = nativestring ( self . text ( ) )
if text or not self . useHintValue ( ) :
return text
return self . hint ( ) |
def write_input ( self , output_dir , make_dir_if_not_present = True , write_cif = False , write_path_cif = False , write_endpoint_inputs = False ) :
"""NEB inputs has a special directory structure where inputs are in 00,
01 , 02 , . . . .
Args :
output _ dir ( str ) : Directory to output the VASP input files... | output_dir = Path ( output_dir )
if make_dir_if_not_present and not output_dir . exists ( ) :
output_dir . mkdir ( parents = True )
self . incar . write_file ( str ( output_dir / 'INCAR' ) )
self . kpoints . write_file ( str ( output_dir / 'KPOINTS' ) )
self . potcar . write_file ( str ( output_dir / 'POTCAR' ) )
f... |
def sent_tokenize ( self , text , ** kwargs ) :
"""Returns a list of sentences .
Each sentence is a space - separated string of tokens ( words ) .
Handles common cases of abbreviations ( e . g . , etc . , . . . ) .
Punctuation marks are split from other words . Periods ( or ? ! ) mark the end of a sentence . ... | sentences = find_sentences ( text , punctuation = kwargs . get ( "punctuation" , PUNCTUATION ) , abbreviations = kwargs . get ( "abbreviations" , ABBREVIATIONS_DE ) , replace = kwargs . get ( "replace" , replacements ) , linebreak = r"\n{2,}" )
return sentences |
def get_fields ( model , fields = None ) :
"""Assigns fields for model .""" | include = [ f . strip ( ) for f in fields . split ( ',' ) ] if fields else None
return utils . get_fields ( model , include ) |
def RS ( S , second_pass = False ) :
"""Compute a C / F splitting using Ruge - Stuben coarsening
Parameters
S : csr _ matrix
Strength of connection matrix indicating the strength between nodes i
and j ( S _ ij )
second _ pass : bool , default False
Perform second pass of classical AMG coarsening . Can b... | if not isspmatrix_csr ( S ) :
raise TypeError ( 'expected csr_matrix' )
S = remove_diagonal ( S )
T = S . T . tocsr ( )
# transpose S for efficient column access
splitting = np . empty ( S . shape [ 0 ] , dtype = 'intc' )
influence = np . zeros ( ( S . shape [ 0 ] , ) , dtype = 'intc' )
amg_core . rs_cf_splitting (... |
def get_user ( self , identified_with , identifier , req , resp , resource , uri_kwargs ) :
"""Return default user object .""" | return self . user |
def _db_install ( self , db_name ) :
"""Install nipap database schema""" | self . _logger . info ( "Installing NIPAP database schemas into db" )
self . _execute ( db_schema . ip_net % ( db_name ) )
self . _execute ( db_schema . functions )
self . _execute ( db_schema . triggers ) |
def submit_msql_object_query ( object_query , client = None ) :
"""Submit ` object _ query ` to MemberSuite , returning
. models . MemberSuiteObjects .
So this is a converter from MSQL to . models . MemberSuiteObjects .
Returns query results as a list of MemberSuiteObjects .""" | client = client or get_new_client ( )
if not client . session_id :
client . request_session ( )
result = client . execute_object_query ( object_query )
execute_msql_result = result [ "body" ] [ "ExecuteMSQLResult" ]
membersuite_object_list = [ ]
if execute_msql_result [ "Success" ] :
result_value = execute_msql... |
def _apicall ( self , method , ** params ) :
"""Call an API method and return response data . For more info , see :
https : / / ccp . netcup . net / run / webservice / servers / endpoint""" | LOGGER . debug ( '%s(%r)' , method , params )
auth = { 'customernumber' : self . _get_provider_option ( 'auth_customer_id' ) , 'apikey' : self . _get_provider_option ( 'auth_api_key' ) , }
if method == 'login' :
auth [ 'apipassword' ] = self . _get_provider_option ( 'auth_api_password' )
else :
auth [ 'apisessi... |
def delete_persistent_data ( role , zk_node ) :
"""Deletes any persistent data associated with the specified role , and zk node .
: param role : the mesos role to delete , or None to omit this
: type role : str
: param zk _ node : the zookeeper node to be deleted , or None to skip this deletion
: type zk _ ... | if role :
destroy_volumes ( role )
unreserve_resources ( role )
if zk_node :
delete_zk_node ( zk_node ) |
def snake_case_backend_name ( self ) :
"""CamelCase - > camel _ case""" | s1 = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , type ( self ) . __name__ )
return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , s1 ) . lower ( ) |
def find_resource ( r , * , pkg = 'cyther' ) :
"""Finds a given cyther resource in the ' test ' subdirectory in
' cyther ' package""" | file_path = pkg_resources . resource_filename ( pkg , os . path . join ( 'test' , r ) )
if not os . path . isfile ( file_path ) :
msg = "Resource '{}' does not exist"
raise FileNotFoundError ( msg . format ( file_path ) )
return file_path |
def _add_ticks ( ax : Axes , h1 : Histogram1D , kwargs : dict ) :
"""Customize ticks for an axis ( 1D histogram ) .
Parameters
ticks : { " center " , " edge " } , optional
Position of the ticks
tick _ handler : Callable [ [ Histogram1D , float , float ] , Tuple [ List [ float ] , List [ str ] ] ]""" | ticks = kwargs . pop ( "ticks" , None )
tick_handler = kwargs . pop ( "tick_handler" , None )
if tick_handler :
if ticks :
raise ValueError ( "Cannot specify both tick and tick_handler" )
ticks , labels = tick_handler ( h1 , * ax . get_xlim ( ) )
ax . set_xticks ( ticks )
ax . set_xticklabels ( ... |
def _read_para_hip_transport_mode ( self , code , cbit , clen , * , desc , length , version ) :
"""Read HIP HIP _ TRANSPORT _ MODE parameter .
Structure of HIP HIP _ TRANSPORT _ MODE parameter [ RFC 6261 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Type | Length |
| Port |... | if clen % 2 != 0 :
raise ProtocolError ( f'HIPv{version}: [Parano {code}] invalid format' )
_port = self . _read_unpack ( 2 )
_mdid = list ( )
for _ in range ( ( clen - 2 ) // 2 ) :
_mdid . append ( _TP_MODE_ID . get ( self . _read_unpack ( 2 ) , 'Unassigned' ) )
hip_transport_mode = dict ( type = desc , critic... |
def get_minions ( ) :
'''Return a list of minions''' | log . debug ( 'sdstack_etcd returner <get_minions> called' )
ret = [ ]
client , path = _get_conn ( __opts__ )
items = client . get ( '/' . join ( ( path , 'minions' ) ) )
for item in items . children :
comps = str ( item . key ) . split ( '/' )
ret . append ( comps [ - 1 ] )
return ret |
def align ( fastq_file , pair_file , ref_file , names , align_dir , data , extra_args = None ) :
"""Alignment with bowtie2.""" | config = data [ "config" ]
analysis_config = ANALYSIS . get ( data [ "analysis" ] . lower ( ) )
assert analysis_config , "Analysis %s is not supported by bowtie2" % ( data [ "analysis" ] )
out_file = os . path . join ( align_dir , "{0}-sort.bam" . format ( dd . get_sample_name ( data ) ) )
if data . get ( "align_split"... |
def _GetElementDataTypeDefinition ( self , data_type_definition ) :
"""Retrieves the element data type definition .
Args :
data _ type _ definition ( DataTypeDefinition ) : data type definition .
Returns :
DataTypeDefinition : element data type definition .
Raises :
FormatError : if the element data typ... | if not data_type_definition :
raise errors . FormatError ( 'Missing data type definition' )
element_data_type_definition = getattr ( data_type_definition , 'element_data_type_definition' , None )
if not element_data_type_definition :
raise errors . FormatError ( 'Invalid data type definition missing element' )
... |
def refresh_committed_offsets_if_needed ( self ) :
"""Fetch committed offsets for assigned partitions .""" | if self . _subscription . needs_fetch_committed_offsets :
offsets = self . fetch_committed_offsets ( self . _subscription . assigned_partitions ( ) )
for partition , offset in six . iteritems ( offsets ) : # verify assignment is still active
if self . _subscription . is_assigned ( partition ) :
... |
def _get_approved_attributes ( self , idp , idp_policy , sp_entity_id , state ) :
"""Returns a list of approved attributes
: type idp : saml . server . Server
: type idp _ policy : saml2 . assertion . Policy
: type sp _ entity _ id : str
: type state : satosa . state . State
: rtype : list [ str ]
: par... | name_format = idp_policy . get_name_form ( sp_entity_id )
attrconvs = idp . config . attribute_converters
idp_policy . acs = attrconvs
attribute_filter = [ ]
for aconv in attrconvs :
if aconv . name_format == name_format :
all_attributes = { v : None for v in aconv . _fro . values ( ) }
attribute_fi... |
def is_valid_package_name ( name , raise_error = False ) :
"""Test the validity of a package name string .
Args :
name ( str ) : Name to test .
raise _ error ( bool ) : If True , raise an exception on failure
Returns :
bool .""" | is_valid = PACKAGE_NAME_REGEX . match ( name )
if raise_error and not is_valid :
raise PackageRequestError ( "Not a valid package name: %r" % name )
return is_valid |
def update_result_ctrl ( self , event ) :
"""Update event result following execution by main window""" | # Check to see if macro window still exists
if not self :
return
printLen = 0
self . result_ctrl . SetValue ( '' )
if hasattr ( event , 'msg' ) : # Output of script ( from print statements , for example )
self . result_ctrl . AppendText ( event . msg )
printLen = len ( event . msg )
if hasattr ( event , 'er... |
def install ( config_file , store , overwrite = False , hooks = False , hook_type = 'pre-commit' , skip_on_missing_conf = False , ) :
"""Install the pre - commit hooks .""" | if cmd_output ( 'git' , 'config' , 'core.hooksPath' , retcode = None ) [ 1 ] . strip ( ) :
logger . error ( 'Cowardly refusing to install hooks with `core.hooksPath` set.\n' 'hint: `git config --unset-all core.hooksPath`' , )
return 1
hook_path , legacy_path = _hook_paths ( hook_type )
mkdirp ( os . path . dirn... |
def ecef2enuv ( u : float , v : float , w : float , lat0 : float , lon0 : float , deg : bool = True ) -> Tuple [ float , float , float ] :
"""VECTOR from observer to target ECEF = > ENU
Parameters
u : float or numpy . ndarray of float
target x ECEF coordinate ( meters )
v : float or numpy . ndarray of float... | if deg :
lat0 = radians ( lat0 )
lon0 = radians ( lon0 )
t = cos ( lon0 ) * u + sin ( lon0 ) * v
uEast = - sin ( lon0 ) * u + cos ( lon0 ) * v
wUp = cos ( lat0 ) * t + sin ( lat0 ) * w
vNorth = - sin ( lat0 ) * t + cos ( lat0 ) * w
return uEast , vNorth , wUp |
def geneways_action_to_indra_statement_type ( actiontype , plo ) :
"""Return INDRA Statement corresponding to Geneways action type .
Parameters
actiontype : str
The verb extracted by the Geneways processor
plo : str
A one character string designating whether Geneways classifies
this verb as a physical ,... | actiontype = actiontype . lower ( )
statement_generator = None
is_direct = ( plo == 'P' )
if actiontype == 'bind' :
statement_generator = lambda substance1 , substance2 , evidence : Complex ( [ substance1 , substance2 ] , evidence = evidence )
is_direct = True
elif actiontype == 'phosphorylate' :
statement_... |
def sort_idxs ( cls , similarities ) :
"Sorts ` similarities ` and return the indexes in pairs ordered by highest similarity ." | idxs = cls . largest_indices ( similarities , len ( similarities ) )
idxs = [ ( idxs [ 0 ] [ i ] , idxs [ 1 ] [ i ] ) for i in range ( len ( idxs [ 0 ] ) ) ]
return [ e for l in idxs for e in l ] |
def get_roles ( self ) :
"""Get the roles associated with the hosts .
Returns
dict of role - > [ host ]""" | machines = self . c_resources [ "machines" ]
result = { }
for desc in machines :
roles = utils . get_roles_as_list ( desc )
hosts = self . _denormalize ( desc )
for role in roles :
result . setdefault ( role , [ ] )
result [ role ] . extend ( hosts )
return result |
def disable_paging ( self , command = "no pager" , delay_factor = 1 ) :
"""Disable paging""" | return super ( QuantaMeshSSH , self ) . disable_paging ( command = command ) |
def compute_between_collection_interval_duration ( self , prefix ) :
"""Calculates BETWEEN - collection intervals for the current collection and measure type
and takes their mean .
: param str prefix : Prefix for the key entry in self . measures .
Negative intervals ( for overlapping clusters ) are counted as... | durations = [ ]
# duration of each collection
for collection in self . collection_list : # Entry , with timing , in timed _ response for first word in collection
start = collection [ 0 ] . start_time
# Entry , with timing , in timed _ response for last word in collection
end = collection [ - 1 ] . end_time
... |
def str_encode ( value , encoder = 'base64' ) :
'''. . versionadded : : 2014.7.0
value
The value to be encoded .
encoder : base64
The encoder to use on the subsequent string .
CLI Example :
. . code - block : : bash
salt ' * ' random . str _ encode ' I am a new string ' base64''' | if six . PY2 :
try :
out = value . encode ( encoder )
except LookupError :
raise SaltInvocationError ( 'You must specify a valid encoder' )
except AttributeError :
raise SaltInvocationError ( 'Value must be an encode-able string' )
else :
if isinstance ( value , six . string_type... |
def is_protein_or_chemical ( agent ) :
'''Return True if the agent is a protein / protein family or chemical .''' | # Default is True if agent is None
if agent is None :
return True
dbs = set ( [ 'UP' , 'HGNC' , 'CHEBI' , 'PFAM-DEF' , 'IP' , 'INDRA' , 'PUBCHEM' , 'CHEMBL' ] )
agent_refs = set ( agent . db_refs . keys ( ) )
if agent_refs . intersection ( dbs ) :
return True
return False |
def set_from_matrix44 ( self , mat ) :
"""Create a new Quat from a Matrix44.
Note that the matrix and indexes are column major .""" | # Matrix trace
trace = mat . data [ 0 ] [ 0 ] + mat . data [ 1 ] [ 1 ] + mat . data [ 2 ] [ 2 ] + 1.0
if trace > 0.00000001 : # n4 is norm of quaternion multiplied by 4.
n4 = math . sqrt ( trace ) * 2
self . x = ( mat . data [ 1 ] [ 2 ] - mat . data [ 2 ] [ 1 ] ) / n4
self . y = ( mat . data [ 2 ] [ 0 ] - m... |
def get_active_conditions ( self , manager ) :
'''Returns a generator which yields groups of lists of conditions .
> > > conditions = switch . get _ active _ conditions ( )
> > > for label , set _ id , field , value , exc in conditions : # doctest : + SKIP
> > > print ( " % ( label ) s : % ( field ) s = % ( v... | for condition_set in sorted ( manager . get_condition_sets ( ) , key = lambda x : x . get_group_label ( ) ) :
ns = condition_set . get_namespace ( )
condition_set_id = condition_set . get_id ( )
if ns in self . value :
group = condition_set . get_group_label ( )
for name , field in condition... |
def _set_flask_alembic ( ) :
from flask_alembic import Alembic
"""Add the SQLAlchemy object in the global extension""" | application . app . extensions [ "sqlalchemy" ] = type ( '' , ( ) , { "db" : db } )
alembic = Alembic ( )
alembic . init_app ( application . app )
return alembic |
def search ( self , category = None , cuisine = None , location = ( None , None ) , radius = None , tl_coord = ( None , None ) , br_coord = ( None , None ) , name = None , country = None , locality = None , region = None , postal_code = None , street_address = None , website_url = None , has_menu = None , open_at = Non... | params = self . _get_params ( category = category , cuisine = cuisine , location = location , radius = radius , tl_coord = tl_coord , br_coord = br_coord , name = name , country = country , locality = locality , region = region , postal_code = postal_code , street_address = street_address , website_url = website_url , ... |
def plotE ( self , * args , ** kwargs ) :
"""NAME :
plotE
PURPOSE :
plot E ( . ) along the orbit
INPUT :
bovy _ plot . bovy _ plot inputs
OUTPUT :
figure to output device
HISTORY :
2014-06-16 - Written - Bovy ( IAS )""" | if kwargs . pop ( 'normed' , False ) :
kwargs [ 'd2' ] = 'Enorm'
else :
kwargs [ 'd2' ] = 'E'
return self . plot ( * args , ** kwargs ) |
def panels ( self ) :
"""Add 2 panels to the figure , top for signal and bottom for gene models""" | ax1 = self . fig . add_subplot ( 211 )
ax2 = self . fig . add_subplot ( 212 , sharex = ax1 )
return ( ax2 , self . gene_panel ) , ( ax1 , self . signal_panel ) |
def column_stack_2d ( data ) :
"""Perform column - stacking on a list of 2d data blocks .""" | return list ( list ( itt . chain . from_iterable ( _ ) ) for _ in zip ( * data ) ) |
def to_pdf ( self , outFileName , imageFileName = None , showBoundingboxes = False , fontname = "Helvetica" , invisibleText = False , interwordSpaces = False , ) :
"""Creates a PDF file with an image superimposed on top of the text .
Text is positioned according to the bounding box of the lines in
the hOCR file... | # create the PDF file
# page size in points ( 1/72 in . )
pdf = Canvas ( outFileName , pagesize = ( self . width , self . height ) , pageCompression = 1 )
# draw bounding box for each paragraph
# light blue for bounding box of paragraph
pdf . setStrokeColorRGB ( 0 , 1 , 1 )
# light blue for bounding box of paragraph
pd... |
def draw_instances ( self , X , y , ** kwargs ) :
"""Draw the instances colored by the target y such that each line is a
single instance . This is the " slow " mode of drawing , since each
instance has to be drawn individually . However , in so doing , the
density of instances in braids is more apparent since... | # Get alpha from param or default
alpha = self . alpha or 0.25
for idx in range ( len ( X ) ) :
Xi = X [ idx ]
yi = y [ idx ]
# TODO : generalize this duplicated code into a single function
if isinstance ( yi , str ) :
label = yi
else : # TODO : what happens if yi is not in classes ? !
... |
def export_image3d ( input , output , size = ( 800 , 600 ) , pcb_rotate = ( 0 , 0 , 0 ) , timeout = 20 , showgui = False ) :
'''Exporting eagle . brd file into 3D image file
using Eagle3D and povray .
GUI is not displayed if ` ` pyvirtualdisplay ` ` is installed .
If export is blocked somehow ( e . g . popup ... | input = norm_path ( input )
output = norm_path ( output )
ext = os . path . splitext ( input ) [ 1 ]
if ext not in [ '.brd' ] :
raise ValueError ( 'Input extension is not ".brd", brd=' + str ( input ) )
commands = [ ]
eagle3d = Path ( __file__ ) . dirname ( ) / 'eagle3d'
ulp = ( eagle3d / '3d50.ulp' ) . abspath ( )... |
def plot_curvature ( self , curv_type = 'mean' , ** kwargs ) :
"""Plots the curvature of the external surface of the grid
Parameters
curv _ type : str , optional
One of the following strings indicating curvature types
- mean
- gaussian
- maximum
- minimum
* * kwargs : optional
Optional keyword arg... | trisurf = self . extract_surface ( ) . tri_filter ( )
return trisurf . plot_curvature ( curv_type , ** kwargs ) |
def chemicals ( self ) :
"""List of namedtuples representing chemical entities in the form
( source , chemical _ name , cas _ registry _ number ) . In case multiple
numbers given , they are joined on " ; " .""" | path = [ 'enhancement' , 'chemicalgroup' , 'chemicals' ]
items = listify ( chained_get ( self . _head , path , [ ] ) )
chemical = namedtuple ( 'Chemical' , 'source chemical_name cas_registry_number' )
out = [ ]
for item in items :
for chem in listify ( item [ 'chemical' ] ) :
number = chem . get ( 'cas-regi... |
def stream_url ( self ) :
'''stream for this song - not re - encoded''' | path = '/Audio/{}/universal' . format ( self . id )
return self . connector . get_url ( path , userId = self . connector . userid , MaxStreamingBitrate = 140000000 , Container = 'opus' , TranscodingContainer = 'opus' , AudioCodec = 'opus' , MaxSampleRate = 48000 , PlaySessionId = 1496213367201 # TODO no hard code
) |
def _split_python ( python ) :
"""Split Python source into chunks .
Chunks are separated by at least two return lines . The break must not
be followed by a space . Also , long Python strings spanning several lines
are not splitted .""" | python = _preprocess ( python )
if not python :
return [ ]
lexer = PythonSplitLexer ( )
lexer . read ( python )
return lexer . chunks |
def strftime ( self , date_format ) :
"""Convert to Index using specified date _ format .
Return an Index of formatted strings specified by date _ format , which
supports the same string format as the python standard library . Details
of the string format can be found in ` python string format
doc < % ( URL... | from pandas import Index
return Index ( self . _format_native_types ( date_format = date_format ) ) |
def cancel ( self , username , project , build_num ) :
"""Cancel the build and return its summary .""" | method = 'POST'
url = ( '/project/{username}/{project}/{build_num}/cancel?' 'circle-token={token}' . format ( username = username , project = project , build_num = build_num , token = self . client . api_token ) )
json_data = self . client . request ( method , url )
return json_data |
def _get_I ( self , a , b , size , plus_transpose = True ) :
"""Return I matrix in Chaput ' s PRL paper .
None is returned if I is zero matrix .""" | r_sum = np . zeros ( ( 3 , 3 ) , dtype = 'double' , order = 'C' )
for r in self . _rotations_cartesian :
for i in range ( 3 ) :
for j in range ( 3 ) :
r_sum [ i , j ] += r [ a , i ] * r [ b , j ]
if plus_transpose :
r_sum += r_sum . T
# Return None not to consume computer for diagonalization... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.