id int64 0 10k | text stringlengths 186 4k | length int64 128 1.02k |
|---|---|---|
100 | import os
import kerinou.views
from flask import Flask
app = Flask(__name__)
app.config.from_object('kerinou.default_settings')
app.config.from_envvar('KERINOU_SETTINGS')
if not app.debug:
import logging
from logging.handlers import TimedRotatingFileHandler
# https://docs.python.org/3.6/library/logging.handlers.html#timedrotatingfilehandler
logfile = os.path.join(app.config['LOG_DIR'], 'kerinou.log')
file_handler = TimedRotatingFileHandler(logfile, 'midnight')
file_handler.setLevel(logging.WARNING)
logformat = logging.Formatter('<%(asctime)s> <%(levelname)s> %(message)s')
file_handler.setFormatter(logformat)
app.logger.addHandler(file_handler)
| 256 |
101 | from flask import Flask
from .view_classes import BasicView
from nose.tools import eq_
app = Flask("common")
app.config["SERVER_NAME"] = "test.test"
BasicView.register(app, subdomain="basic")
client = app.test_client()
def test_index_subdomain():
resp = client.get("/basic/", base_url="http://basic.test.test")
eq_(b"Index", resp.data)
def test_get():
resp = client.get("/basic/1234/", base_url="http://basic.test.test")
eq_(b"Get 1234", resp.data)
resp = client.get("/basic/1234", base_url="http://basic.test.test")
eq_(resp.status_code, 301)
def test_put():
resp = client.put("/basic/1234/", base_url="http://basic.test.test")
eq_(b"Put 1234", resp.data)
resp = client.put("/basic/1234", base_url="http://basic.test.test")
eq_(resp.status_code, 301)
def test_patch():
resp = client.patch("/basic/1234/", base_url="http://basic.test.test")
eq_(b"Patch 1234", resp.data)
resp = client.patch("/basic/1234", base_url="http://basic.test.test")
eq_(resp.status_code, 301)
def test_post():
resp = client.post("/basic/", base_url="http://basic.test.test")
eq_(b"Post", resp.data)
def test_delete():
resp = client.delete("/basic/1234/", base_url="http://basic.test.test")
eq_(b"Delete 1234", resp.data)
resp = client.delete("/basic/1234", base_url="http://basic.test.test")
eq_(resp.status_code, 301)
def test_custom_method():
resp = client.get("/basic/custom_method/",
base_url="http://basic.test.test")
eq_(b"Custom Method", resp.data)
def test_custom_method_with_params():
resp = client.get("/basic/custom_method_with_params/1234/abcd/",
base_url="http://basic.test.test")
eq_(b"Custom Method 1234 abcd", resp.data)
resp = client.get("/basic/custom_method_with_params/1234/abcd",
base_url="http://basic.test.test")
eq_(resp.status_code, 301)
def test_routed_method():
resp = client.get("/basic/routed/", base_url="http://basic.test.test")
eq_(b"Routed Method", resp.data)
def test_multi_routed_method():
resp = client.get("/basic/route1/", base_url="http://basic.test.test")
eq_(b"Multi Routed Method", resp.data)
resp = client.get("/basic/route2/", base_url="http://basic.test.test")
eq_(b"Multi Routed Method", resp.data)
def test_no_slash():
resp = client.get("/basic/noslash", base_url="http://basic.test.test")
eq_(b"No Slash Method", resp.data)
| 1,000 |
102 | import os
def check_env(env_var_name):
"""
Check and return the type of an environment variable.
supported types:
None
Integer
String
@param env_var_name: environment variable name
@return: string of the type name.
"""
try:
val = os.getenv(env_var_name)
if val is None:
return 'None'
except Exception as ex:
return "None"
try:
int_val = int(val)
return 'Integer'
except ValueError:
return 'String'
| 233 |
103 | # coding: utf-8
from collections import defaultdict
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def findMode(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
counts = defaultdict(int)
stack = []
curr = root
# DFS
while True:
while curr:
stack.append(curr)
counts[curr.val] += 1
curr = curr.left
if len(stack) == 0:
break
curr = stack.pop()
curr = curr.right
# Find all modes
mode = -1
curr_modes = []
for c in counts:
if counts[c] == mode:
curr_modes.append(c)
if counts[c] > mode:
mode = counts[c]
curr_modes = [c]
return curr_modes
| 551 |
104 | import Const
import RunClassCommon
OPT900P_costs = [0.0593, 0.0653, 0.0584, 0.0659, 0.0076, 0.0117, 0.0076, 0.0287]
OPTMem_costs = [0.1001, 0.4416, 0.0997, 0.4429, 0.0057, 0.0093, 0.0060, 0.0177]
experiment_list = [
{
'Experiment': Const.EXT_CP,
'Setup': 'Intel900P_ECP_ClusterAbs_LeadAbs_Scan',
'Flush_Method': Const.FLUSH_SCAN,
'Recluster_Pct': 20,
'Ins_Buffer_Size': 17179869184,
'OS_Buffer_Size': 51539607552,
'Cluster_Size_Strategy': Const.SIZE_ABSOLUTE,
'Leader_Size_Strategy': Const.LEAD_SIZE_ABSOLUTE,
'Leader_Target_Size': 100,
'IO_Costs': OPT900P_costs,
'Write_Iotrace': True,
'Log_Name': 'Intel900P_ECP_ClusterAbs_LeadAbs_Scan.log'
}
]
if __name__ == '__main__':
RunClassCommon.main(experiment_list)
| 401 |
105 | import typing
from . import app
from quart import request
from ujson import dumps
from flask_babel import get_locale
from .models import TranslationUnits
@app.template_filter("get_unit")
def _get_unit_filter(unit):
return app.translations.get_unit(request.locale, unit)
@app.template_filter("format")
def _format_filter(str, **kwargs):
return str.format(**kwargs)
@app.template_filter("dictjoin")
def _dict_join_filter(base, *args) -> dict:
if isinstance(base, list):
[base[0].update(_base) for _base in base[1:]]
elif isinstance(base, dict):
[base.update(_base) for _base in args]
return base
@app.template_filter("jsonify")
def _jsonify_filter(obj) -> dict:
if isinstance(obj, list):
return [_obj.jsonify() for _obj in obj]
else:
return obj.jsonify()
@app.template_filter("unitjoin")
def _unit_join_filter(units: typing.List[TranslationUnits]) -> dict:
res = {}
for unit in units:
res[unit.lang] = dict(
translation=unit.translation,
default=unit.default,
id=unit.id,
unit=unit.unit,
)
return res
@app.template_filter("keys")
def _keys_filter(item: dict) -> list:
return [*item.keys()]
| 496 |
106 | import ttarray.raw as raw
from .. import random_array,check_raw_ttslice_dense,calc_chi,DENSE_SHAPE
import numpy.linalg as la
import pytest
import copy
import itertools
# SHAPE_RECLUSTER=[
# ((2,3),[((),),((),(),()),((),())]),
# ((2,24,3),[((24,),),((1,),(24,)),((3,),(4,),(2,)),((2,),(2,),(2,),(3,)),((24,),(1,),(1,)),((3,),(2,),(2,),(1,),(2,))]),
# ((2,64,3),[((2,),(32,)),((2,),(2,),(2,),(2,),(2,),(2,)),((4,),(4,),(4,)),((2,),(4,),(4,),(2,)),((4,),(4,),(4,)),((2,),(2,),(2,),(2,),(2,),(2,))]),
# ((2,64,24,1),[((2,2),(32,12)),((2,1),(2,1),(2,1),(2,24),(2,1),(2,1)),((4,4),(4,2),(4,3)),((2,3),(1,8),(4,1),(4,1),(2,1)),((4,2),(4,2),(4,6)),((2,2),(2,2),(2,2),(2,3),(2,1),(2,1))]),
# ]
#
# @pytest.fixture(params=SHAPE_RECLUSTER)
# def shape_recluster(request):
# return request.param
def test_recluster(seed_rng):
for shape,cls in DENSE_SHAPE.items():
shape=(2,)+shape+(3,)
ar=random_array(shape,float)
for c1 in cls:
ttar=raw.dense_to_ttslice(ar,c1,la.qr)
check_raw_ttslice_dense(ttar,ar,c1,calc_chi(c1,2,3))
for c2 in cls:
ttar2=raw.recluster(copy.copy(ttar),c2,raw.trivial_decomposition)
check_raw_ttslice_dense(ttar2,ar,c2,None)
| 733 |
107 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 29 08:48:46 2020
@author: lukas and martina
"""
####
# This script imports the literature from Zotero. All items with the tag "*****" are called (see readme).
# Afterwards the literature information is saved in JSON format for later use
####
# Use one module for the API request and one for the JSON formatting
import urllib.request, json
# Variable for the literature data
data_tot = []
# Here the API is called and saved
with urllib.request.urlopen("https://api.zotero.org/groups/113737/items?tag=*****&limit=100") as url:
data = json.loads(url.read().decode())
for j in data:
data_tot.append(j)
# Finally the data is saved as a javascript variable
with open('zotero.js', 'w') as outfile:
outfile.write('var dat_zot = ')
json.dump(data_tot, outfile)
| 303 |
108 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-MCCS-EngineShared
GUID : bf460fc6-45c5-4119-add3-e361a6e7d5ac
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("bf460fc6-45c5-4119-add3-e361a6e7d5ac"), event_id=1, version=0)
class Microsoft_Windows_MCCS_EngineShared_1_0(Etw):
pattern = Struct(
"P1_HResult" / Int32sl,
"P2_String" / CString,
"P3_UInt32" / Int32ul
)
@declare(guid=guid("bf460fc6-45c5-4119-add3-e361a6e7d5ac"), event_id=2, version=0)
class Microsoft_Windows_MCCS_EngineShared_2_0(Etw):
pattern = Struct(
"P1_HResult" / Int32sl,
"P2_String" / CString,
"P3_UInt32" / Int32ul
)
@declare(guid=guid("bf460fc6-45c5-4119-add3-e361a6e7d5ac"), event_id=3001, version=0)
class Microsoft_Windows_MCCS_EngineShared_3001_0(Etw):
pattern = Struct(
"Prop_UnicodeString" / WString
)
| 531 |
109 | from rest_framework import serializers
from sakila.models import Film
from sakila.models_views import CustomerList
class FilmSerializer(serializers.ModelSerializer):
class Meta:
model = Film
fields = '__all__'
# fields = ('title', 'description', 'release_year', 'language', 'original_language', 'replacement_cost', 'special_features', )
class CustomerListSerializer(serializers.ModelSerializer):
class Meta:
model = CustomerList
fields = '__all__' | 167 |
110 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitSku(Model):
"""Contains SKU in an ExpressRouteCircuit.
:param name: The name of the SKU.
:type name: str
:param tier: The tier of the SKU. Possible values are 'Standard' and
'Premium'. Possible values include: 'Standard', 'Premium'
:type tier: str or
~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitSkuTier
:param family: The family of the SKU. Possible values are: 'UnlimitedData'
and 'MeteredData'. Possible values include: 'UnlimitedData', 'MeteredData'
:type family: str or
~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitSkuFamily
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExpressRouteCircuitSku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
self.family = kwargs.get('family', None)
| 520 |
111 | import neuroglancer
import numpy as np
import sys
import tifffile
import h5py
# RUN SCRIPT: python3 -i visualng.py
# General settings
ip = 'localhost' # or public IP of the machine for sharable display
port = 13333 # change to an unused port number
neuroglancer.set_server_bind_address(bind_address=ip, bind_port=port)
#Create a new viewer. This starts a webserver in a background thread, which serves a copy of the Neuroglancer client.
viewer = neuroglancer.Viewer()
file = 'outputs/neuroglancer.h5'
# resolution & dimension of the data
res = [4, 4, 40];
# Adapting viewer config
'''
with viewer.txn() as s:
s.layout = '3d'
s.projection_scale = 3000
'''
# Image section
print('load image')
image = h5py.File(file, 'r')
tmp_im = np.array(image['image'][:100, :1000, :1000])
data_im = np.swapaxes(tmp_im, 0, 2)
with viewer.txn() as s:
s.layers.append(
name='image',
layer=neuroglancer.LocalVolume(
data=data_im,
#data=np.array(image['main'][:100, :1000, :1000]),
#voxel_size=res,
volume_type='image'
))
#s.projection_scale=2000000000
# Segmentation section
print('load segmentation')
image = h5py.File(file, 'r')
tmp_gt = np.array(image['label'][:100, :1000, :1000])
data_gt = np.swapaxes(tmp_gt, 0, 2)
with viewer.txn() as s:
s.layers.append(
name = 'segmentation',
layer = neuroglancer.LocalVolume(
data = data_gt,
voxel_size = res,
volume_type = "segmentation"
))
s.layout = '3d'
print(viewer)
#print(neuroglancer.to_url(viewer.state)) | 724 |
112 | import _plotly_utils.basevalidators
class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='ticktextsrc',
parent_name='scatter3d.marker.colorbar',
**kwargs
):
super(TicktextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 250 |
113 | #=========================================================================
# Python wrapper to create a power curve for a particular design
#=========================================================================
#=========================================================================
# Import modules
#=========================================================================
from calc_power import calc_power
#=========================================================================
# Create dictionaries for easy data handling
#=========================================================================
path = '/home/yosemite/Dropbox/PrasadUM/'
#=========================================================================
# Define flight condition parameters
#=========================================================================
Flight = {'V': 240, 'alt': 1500, 'rho': 0.002} # knots, m, slug.cu.ft
#=========================================================================
# Define vehicle design (lbs, sq.ft)
#=========================================================================
Airframe = {'Wt': 22036, 'f': 18.0}
#=========================================================================
# Define wing design: aspect ratio, wing lift fraction
#=========================================================================
Wing = {'AR': 9, 'fw': 0.8}
#=========================================================================
# Define rotor design: # blades, ft, lift offset, sol, adv. tip mach limit
# Vtiph is in m/s
#=========================================================================
Rotor = { 'Nb': 4, 'Rft': 23.3, 'loff': 0.0, 'sigma': 0.1, 'Mtip': 0.8,
'Vtiph': 240.0 , 'flap_freq': 1.1, 'NR': 2}
#=========================================================================
# Blade properties: twist (nose down, deg), flap freq / rev (hover),
# blade mass in kg (each blade)
#=========================================================================
Blade = {'twist': 4.0, 'flap_freq': 1.1, 'mass': 70.0}
#=========================================================================
# Pack all dictionaries into outer dict
#=========================================================================
Aircraft = {'Rotor': Rotor, 'Airframe': Airframe, 'Wing': Wing,
'Flight': Flight, 'Blade': Blade }
#=========================================================================
# Call power curve calculator (airspeed loop inside!)
#=========================================================================
calc_power(Aircraft, path, 'PowerCurve/')
#=========================================================================
# End of operations
#=========================================================================
| 678 |
114 | import _plotly_utils.basevalidators
class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xanchor", parent_name="layout.image", **kwargs):
super(XanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["left", "center", "right"]),
**kwargs
)
| 239 |
115 | # Program to transpose a matrix using a nested loop
X = [[12, 7],
[4, 5],
[3, 8]]
result = [[0, 0, 0],
[0, 0, 0]]
# iterate through rows
for i in range(len(X)):
# iterate through columns
for j in range(len(X[0])):
result[j][i] = X[i][j]
for r in result:
print(r)
| 140 |
116 | # -*- coding: utf-8 -*-
import os
from flask import Flask, request, jsonify
import FaceProcessing
from faces import save_embedding
# Initialize the Flask application
app = Flask(__name__)
# route http posts to this method
BASEDIR = os.getenv('RUNTIME_BASEDIR',os.path.abspath(os.path.dirname(__file__)))
@app.route('/api/embedding', methods=['POST'])
def embedding():
embedding = FaceProcessing.FaceProcessingBase64ImageData2(request.data)
embedding_str = save_embedding.convert_embedding_to_string(embedding)
print(embedding_str)
return jsonify({'embedding':embedding_str}), 200
if __name__ == '__main__':
FaceProcessing.init_embedding_processor()
print("start to warm up")
embedding = FaceProcessing.FaceProcessingImageData2(os.path.join(BASEDIR,"image","Mike_Alden_0001_tmp.png"))
print("warmed up")
print(embedding)
app.run(host="0.0.0.0", port=6000)
| 326 |
117 | #Desafio 012 -> Faça um algoritmo que leia o preço de um produto
# e mostre seu novo preço,com 5% de desconto
preco = float(input('Quanto custa o produto?'))
desconto = preco / 20
pdesconto = preco - desconto
print('Com um desconto de 5%, o novo valor do produto passa a ser R${}'.format(pdesconto))
#Outra forma
preco2 = float(input('Quanto custa o produto?'))
print('Com um desconto de 5%, o novo valor do produto passa a ser R${}'.format(preco2*0.95)) | 190 |
118 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from quantum.plugins.linuxbridge.common import config # noqa
from quantum.tests import base
class ConfigurationTest(base.BaseTestCase):
def test_defaults(self):
self.assertEqual(2,
cfg.CONF.AGENT.polling_interval)
self.assertEqual('sudo',
cfg.CONF.AGENT.root_helper)
self.assertEqual('local',
cfg.CONF.VLANS.tenant_network_type)
self.assertEqual(0,
len(cfg.CONF.VLANS.network_vlan_ranges))
self.assertEqual(0,
len(cfg.CONF.LINUX_BRIDGE.
physical_interface_mappings))
| 530 |
119 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Test QiSrc Grep """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import py
import qisrc.git
def setup_projects(qisrc_action):
""" Create two git projects with one match for the "spam" pattern """
foo_proj = qisrc_action.create_git_project("foo")
bar_proj = qisrc_action.create_git_project("bar")
foo_path = py.path.local(foo_proj.path) # pylint:disable=no-member
foo_git = qisrc.git.Git(foo_proj.path)
_bar_git = qisrc.git.Git(bar_proj.path)
foo_path.join("a.txt").write("this is spam\n")
foo_git.add("a.txt")
def test_all_by_default(qisrc_action, record_messages):
""" Test All By Default """
setup_projects(qisrc_action)
record_messages.reset()
rc = qisrc_action("grep", "spam", retcode=True)
assert rc == 0
assert record_messages.find("foo")
assert record_messages.find("bar")
assert record_messages.find("this is spam")
def test_using_projects(qisrc_action):
""" Test Using Projects """
setup_projects(qisrc_action)
rc = qisrc_action("grep", "-p", "foo", "spam", retcode=True)
assert rc == 0
rc = qisrc_action("grep", "-p", "bar", "spam", retcode=True)
assert rc == 1
def test_using_git_grep_options(qisrc_action, record_messages):
""" Test Usin Git Grep Option """
setup_projects(qisrc_action)
rc = qisrc_action("grep", "--", "-i", "-l", "Spam", retcode=True)
assert rc == 0
assert record_messages.find("a.txt")
def test_worktree_paths(qisrc_action, record_messages):
""" Test Wortree Paths """
setup_projects(qisrc_action)
_rc = qisrc_action("grep", "--path", "worktree", "--", "-i", "-l", "Spam", retcode=True)
assert record_messages.find("foo/a.txt")
| 773 |
120 | def polishNotation(exp: list):
# Este algoritmo resuelve expresiones matemáticas en notación polaca inversa
# La expresión matemática se ingresa en un arreglo
# Se utiliza una pila para la solución de las operaciones hasta completarlas todas
# Retorna la solución de la operación enviada
stack = []
for i in exp:
if isOperator(i):
op = i
n2 = stack.pop()
n1 = stack.pop()
if op == "+":
res = int(n1) + int(n2)
stack.append(f'{res}')
if op == "-":
res = int(n1) - int(n2)
stack.append(f'{res}')
if op == "x":
res = int(n1) * int(n2)
stack.append(f'{res}')
if op == "/":
res = int(n1) / int(n2)
stack.append(f'{int(res)}')
else:
stack.append(i)
return stack[0]
def isOperator(ch):
# Este programa retorna True si el caracter ingresado es un operador matemático
# De lo contrario retorna False
if ch == '+' or ch == '-' or ch == 'x' or ch == '/':
return True
return False
# Test
expression = ['1', '5', 'x']
print(expression)
print(polishNotation(expression))
expression = ['2', '1', '+', '3', 'x']
print(expression)
print(polishNotation(expression))
expression = ['4', '13', '5', '/', '+']
print(expression)
print(polishNotation(expression))
expression = ['10', '6', '9', '3', '+', '-11', 'x', '/', 'x', '17', '+', '5', '+']
print(expression)
print(polishNotation(expression)) | 633 |
121 | import time
from sqlalchemy import BIGINT, BOOLEAN, INT, Column, String, Table
from sqlalchemy.sql.expression import select
from mayday.db.tables import BaseModel
from mayday.objects.user import User
class UsersModel(BaseModel):
def __init__(self, engine, metadata, role='reader'):
table = Table(
'users',
metadata,
Column('id', INT, primary_key=True, autoincrement=True),
Column('user_id', BIGINT, unique=True),
Column('username', String(255)),
Column('first_name', String(255), nullable=True),
Column('last_name', String(255), nullable=True),
Column('language_code', String(16)),
Column('is_admin', BOOLEAN, default=False),
Column('is_blacklist', BOOLEAN, default=False),
Column('is_bot', BOOLEAN, default=False),
Column('created_at', BIGINT),
Column('updated_at', BIGINT),
extend_existing=True)
super().__init__(engine, metadata, table, role)
def _auth(self, user: User):
stmt = select([self.table.c.is_blacklist, self.table.c.is_admin]).where(self.table.c.user_id == user.user_id)
row = self.execute(stmt).fetchone()
if row:
return dict(zip(['is_blacklist', 'is_admin'], row))
return None
def get_user_profile(self, user_id: int) -> User:
stmt = select(['*']).where(self.table.c.user_id == user_id)
row = self.execute(stmt).fetchone()
if row:
user_profile = dict(zip([col.key for col in self.table.columns], row))
return User(user_profile=user_profile)
return None
def auth(self, user: User) -> dict:
# check isExisted and blacklist
auth_result = self._auth(user)
if auth_result:
self.raw_update(self.table.c.user_id == user.user_id, dict(updated_at=int(time.time())))
return auth_result
self.raw_upsert(user.to_dict())
return self._auth(user)
def ban_user(self, user: User):
return self.raw_update(self.table.c.user_id == user.user_id, dict(is_blacklist=True)).rowcount
| 956 |
122 | # -*- coding: utf-8 -*-
import os
import shutil
import unittest
import optparse
import anadama2.document
class TestPweaveDocument(unittest.TestCase):
def test_filter_zero_rows(self):
doc = anadama2.document.PweaveDocument()
names=["s1","s2","s3"]
data=[[0,0,1],[0,0,0],[1,0,0]]
filtered_names, filtered_data = doc.filter_zero_rows(names,data)
self.assertEqual(filtered_names,["s1","s3"])
for x,y in zip(filtered_data, [[0,0,1],[1,0,0]]):
self.assertListEqual(x,y)
def test_filter_zero_rows_no_zeros(self):
doc = anadama2.document.PweaveDocument()
names=["s1","s2","s3"]
data=[[0,0,1],[0,1,0],[1,0,0]]
filtered_names, filtered_data = doc.filter_zero_rows(names,data)
self.assertEqual(filtered_names,["s1","s2","s3"])
for x,y in zip(filtered_data, [[0,0,1],[0,1,0],[1,0,0]]):
self.assertListEqual(x,y)
def test_filter_zero_columns(self):
doc = anadama2.document.PweaveDocument()
names=["s1","s2","s3"]
data=[[0,0,1],[0,0,0],[1,0,0]]
filtered_names, filtered_data = doc.filter_zero_columns(names,data)
self.assertEqual(filtered_names,["s1","s3"])
for x,y in zip(filtered_data, [[0,1],[0,0],[1,0]]):
self.assertListEqual(x,y)
def test_filter_zero_columns_no_zeros(self):
doc = anadama2.document.PweaveDocument()
names=["s1","s2","s3"]
data=[[0,0,1],[0,1,0],[1,0,0]]
filtered_names, filtered_data = doc.filter_zero_columns(names,data)
self.assertEqual(filtered_names,["s1","s2","s3"])
for x,y in zip(filtered_data,[[0,0,1],[0,1,0],[1,0,0]]):
self.assertListEqual(x,y)
if __name__ == "__main__":
unittest.main()
| 986 |
123 | # Copyright 2013-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
# the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import find_packages, setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
console_scripts = [
"sqswatcher = sqswatcher.sqswatcher:main",
"nodewatcher = nodewatcher.nodewatcher:main",
"jobwatcher = jobwatcher.jobwatcher:main",
]
version = "2.6.1"
requires = ["boto3>=1.7.55", "retrying>=1.3.3", "paramiko>=2.4.2"]
setup(
name="aws-parallelcluster-node",
version=version,
author="Amazon Web Services",
description="aws-parallelcluster-node provides the scripts for an AWS ParallelCluster node.",
url="https://github.com/aws/aws-parallelcluster-node",
license="Apache License 2.0",
packages=find_packages("src", exclude=["tests"]),
package_dir={"": "src"},
python_requires=">=3.5",
install_requires=requires,
entry_points=dict(console_scripts=console_scripts),
include_package_data=True,
zip_safe=False,
package_data={"": ["examples/config"]},
long_description=(
"aws-parallelcluster-node is the python package installed on the Amazon EC2 instances launched "
"as part of AWS ParallelCluster."
),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Programming Language :: Python",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
],
)
| 745 |
124 | """
This is a collection of tags and filters for :class:`~integreat_cms.cms.models.push_notifications.push_notification.PushNotification`
objects.
"""
from django import template
register = template.Library()
@register.simple_tag
def get_translation(push_notification, language_slug):
"""
This tag returns the most recent translation of the requested push notification in the requested language.
:param push_notification: The requested push notification
:type push_notification: ~integreat_cms.cms.models.push_notifications.push_notification.PushNotification
:param language_slug: The slug of the requested language
:type language_slug: str
:return: The push notification translation
:rtype: ~integreat_cms.cms.models.push_notifications.push_notification_translation.PushNotificationTranslation
"""
return push_notification.translations.filter(language__slug=language_slug).first()
| 264 |
125 | import base64
from django.contrib.auth.models import AnonymousUser
from django.conf import settings
from django.utils.crypto import constant_time_compare
from rest_framework import authentication
from rest_framework import exceptions
class SettingsAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
has_api_key = hasattr(settings, 'API_KEY')
using_basic_auth = 'HTTP_AUTHORIZATION' in request.META
if using_basic_auth and has_api_key:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2 and auth[0].lower() == "basic":
key = base64.b64decode(auth[1]).split(':')[0]
if constant_time_compare(settings.API_KEY, key):
request._salmon_allowed = True
return (AnonymousUser, None)
else:
raise exceptions.AuthenticationFailed('No such user')
return None
| 400 |
126 | import gc
import torch
from .utils import *
# === Import model-related objects ===
from comvex.coatnet import CoAtNetConfig, CoAtNetWithLinearClassifier
# === Instantiate your Model ===
# - For specializations
specializations = [attr for attr in dir(CoAtNetConfig) if attr.startswith("CoAtNet")]
specializations = specializations[:2] # Avoid too large specializations
# === Settings ===
# - Required:
input_shape = (1, 3, 224, 224)
expected_shape = (1, 10)
# - Optional:
input_shape_larger = (1, 3, 384, 384)
input_shape_rect = (1, 3, 280, 336)
kwargs = {}
kwargs['num_classes'] = 10
# === Test Cases ===
# Default test for specializations
def test_forward():
for spec in specializations:
print(spec)
config = getattr(CoAtNetConfig, spec)(**kwargs)
model = CoAtNetWithLinearClassifier(config)
model.eval()
x = torch.randn(input_shape)
out = model(x)
assert_output_shape_wrong(out, expected_shape)
assert_output_has_nan(out)
del model
gc.collect()
# Test when the input size gets larger at the inference time
def test_larger_shape_at_inference():
for spec in specializations:
print(spec)
config = getattr(CoAtNetConfig, spec)(**kwargs)
model = CoAtNetWithLinearClassifier(config)
model.eval()
x = torch.randn(input_shape_larger)
out = model(x)
assert_output_shape_wrong(out, expected_shape)
assert_output_has_nan(out)
del model
gc.collect()
# Test when the input size isn't square at the inference time
def test_rect_shape_at_inference():
for spec in specializations:
print(spec)
config = getattr(CoAtNetConfig, spec)(**kwargs)
model = CoAtNetWithLinearClassifier(config)
model.eval()
x = torch.randn(input_shape_rect)
out = model(x)
assert_output_shape_wrong(out, expected_shape)
assert_output_has_nan(out)
del model
gc.collect() | 815 |
127 | import logging
import controller
def main():
logging.info("Running example")
formated_input = controller.format_input("hello")
proccessed_data = controller.procces_data(formated_input)
formatted_output = controller.format_output(proccessed_data)
logging.info("Done and done")
if __name__ == "__main__":
loglevel = logging.INFO
logging.basicConfig(
format="%(asctime)s |%(levelname)s: %(message)s", level=loglevel
)
main() | 172 |
128 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from django.http import QueryDict
from django.utils.encoding import smart_str
class UnpjaxMiddleware(object):
"""
Removes the `_pjax` parameter from query string
"""
def process_request(self, request):
if "_pjax" in request.META.get("QUERY_STRING", ""):
qs = QueryDict(request.META.get("QUERY_STRING", ""),
encoding=request.encoding, mutable=True)
qs.pop("_pjax", None)
request.META["QUERY_STRING"] = smart_str(qs.urlencode())
| 273 |
129 | # -*- coding: utf-8 -*-
# *******************************************************
# ____ _ _
# / ___|___ _ __ ___ ___| |_ _ __ ___ | |
# | | / _ \| '_ ` _ \ / _ \ __| | '_ ` _ \| |
# | |__| (_) | | | | | | __/ |_ _| | | | | | |
# \____\___/|_| |_| |_|\___|\__(_)_| |_| |_|_|
#
# Sign up for free at http://www.comet.ml
# Copyright (C) 2015-2020 Comet ML INC
# This file can not be copied and/or distributed without
# the express permission of Comet ML Inc.
# *******************************************************
| 237 |
130 | import os
import yaml
import argparse
from datetime import datetime
import torch
import random
import metaworld
from garage.experiment.deterministic import set_seed
from src.env import make_env
from src.algorithm import SAC
from src.agent import Agent
def run(args):
set_seed(args.seed)
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
if args.num_steps is not None:
config['Agent']['num_steps'] = args.num_steps
# Create environments.
task = None
if args.env_id in metaworld.MT1.ENV_NAMES:
task = random.choice(metaworld.MT1(args.env_id).train_tasks)
env = make_env(args.env_id, task)
test_env = make_env(args.env_id, task)
# Device to use.
device = torch.device(
"cuda" if args.cuda and torch.cuda.is_available() else "cpu")
# Specify the directory to log.
time = datetime.now().strftime("%Y%m%d-%H%M")
log_dir = os.path.join(
'logs', args.env_id, f'{args.algo}-seed{args.seed}-{time}')
if args.algo == 'sac':
# SAC algorithm.
algo = SAC(
state_dim=env.observation_space.shape[0],
action_dim=env.action_space.shape[0],
device=device, seed=args.seed, **config['SAC'])
else:
raise Exception('You need to set "--algo sac" or "--algo src".')
agent = Agent(
env=env, test_env=test_env, algo=algo, log_dir=log_dir,
device=device, seed=args.seed, **config['Agent'])
agent.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--config', type=str, default=os.path.join('config', 'metaworld.yaml'))
parser.add_argument('--num_steps', type=int, required=False)
parser.add_argument('--env_id', type=str, default='bin-picking-v1')
parser.add_argument('--algo', choices=['sac'], default='sac')
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
run(args)
| 853 |
131 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_durgur_pyne.iff"
result.attribute_template_id = 9
result.stfName("npc_name","zabrak_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 155 |
132 | import plotly.plotly as py
from plotly.graph_objs import *
from datetime import datetime
import src.lib.debug as debug
import paho.mqtt.client as mqtt
stream1 = Stream(
token='8bwbzgze6l',
maxpoints=50000,
)
stream2 = Stream(
token='d2gchv6j41',
maxpoints=50000,
)
trace1 = Scatter(
x=[],
y=[],
stream=stream1
)
trace2 = Scatter(
x=[],
y=[],
stream=stream2
)
data = Data([trace1, trace2])
py.plot(data)
st1 = py.Stream('8bwbzgze6l')
st2 = py.Stream('d2gchv6j41')
class myStruct():
def __init__(self):
self.lum = 0
self.temp = 0
self.count = 0
#
# The debug function
#
def debug_print(level, s):
debug.debug_print(level, 'sensing|temp_bright_plot: ' + s)
def on_message(mqttc, app_data, msg):
data = msg.payload
if not data:
debug_print(debug.WARN, "Received no data...")
return
split = data.split()
if (split[0] != 'light'):
debug_print(debug.WARN, "Received invalid data: " + data)
return
app_data.lum = app_data.lum + int(split[1])
app_data.temp = app_data.temp + int(split[3])
count = app_data.count
debug_print(debug.INFO, "Lum[" + repr(count) + "] = " + split[1] + " Temp[" + repr(count) + "] = " + split[3])
app_data.count = app_data.count + 1
if (app_data.count == 6):
lum = app_data.lum / 6
temp = app_data.temp / 6
now = datetime.now().strftime("%H:%M")
debug_print(debug.INFO, "Lum: " + repr(lum) + " Temp: " + repr(temp) + " " + now)
st1.write(dict(y=lum, x=now))
st2.write(dict(y=temp, x=now))
app_data.lum = 0
app_data.temp = 0
app_data.count = 0
def on_subscribe(mqttc, app_data, mid, qos):
debug_print(debug.INFO, "Subscribe successful")
def on_connect(mqttc, app_data, flags, rc):
debug_print(debug.INFO, "Connection to mqtt broker successful. Subscribing...")
mqttc.subscribe("sensing/room1/temp_n_bright")
#
# Plotting class for temperature and brightness
#
class main():
st1.open()
st2.open()
app_data = myStruct()
mqttc = mqtt.Client(userdata = app_data)
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_subscribe = on_subscribe
mqttc.connect("127.0.0.1", 1883, 60)
# To limit the size of logfile
global debugCount
debugCount = 0
mqttc.loop_forever()
st1.close()
st2.close()
# This prevents main from executing if this is being imported as a module #
if __name__ == "__main__":
main()
| 997 |
133 | """
WSGI config for StoreApp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "StoreApp.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 129 |
134 | # TODO: Copy all of your 03-Colors.py program and put it below this comment.
# TODO One way to do so is:
# TODO 1. Inside 03-Colors.py, do:
# TODO -- Control-A (to SELECT the entire contents of the file, then
# TODO -- Control-C (to COPY that entire selection)
# TODO 2. Inside this file:
# TODO -- Click below this comment, then
# TODO -- Control-V (to PASTE the copied code into this file.
# TODO: In this module we'll start drawing a simple smiley face
# Yellow circle for the head
# Two black circle eyes
# Red rectangle (rect) mouth
# Red circle nose. | 233 |
135 | from sklearn import svm
import numpy as np
import time
start_time = time.time()
X_train = []
Y_train = []
X_test = []
Y_test = []
#reading non violent video features
for i in range(1,130):
try:
file_name = 'violent_features_NON_VIOLENT/nonvio_'+str(i)+'.txt'
file_obj = open(file_name,'r')
vif = np.loadtxt(file_obj)
if vif.shape[0] == 630:# avoiding hd videos
continue
if i < 92:
X_train.append(vif)
Y_train.append(0)
else:
X_test.append(vif)
Y_test.append(0)
file_obj.close()
except:
continue
print 'error in reading nonvio_%d.txt'%i
#reading violent video features
for i in range(1,130):
try:
file_name = 'violent_features_VIOLENT/vio_'+str(i)+'.txt'
file_obj = open(file_name,'r')
vif = np.loadtxt(file_obj)
if vif.shape[0] == 630:# avoiding hd videos
continue
if i < 92:
X_train.append(vif)
Y_train.append(1)
else:
X_test.append(vif)
Y_test.append(1)
file_obj.close()
except:
continue
print 'error in reading vio_%d.txt'%i
#training
clf = svm.SVC(kernel = 'linear')
clf.fit(X_train,Y_train)
print clf
print("--- %s seconds ---" % (time.time() - start_time))
#predicting
pred = []
for i in X_test:
pred.append(clf.predict(i.reshape(1,-1)))
count = 0
for i in range(0,len(Y_test)):
if pred[i][0] == Y_test[i]:
count = count + 1
print 'accuracy is : '+str(float(count)/len(Y_test))
| 804 |
136 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.cumsum import CumSum
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
class CumSumFrontExtractor(FrontExtractorOp):
op = 'CumSum'
enabled = True
@classmethod
def extract(cls, node):
exclusive = onnx_attr(node, 'exclusive', 'i', 0)
reverse = onnx_attr(node, 'reverse', 'i', 0)
CumSum.update_node_stat(node, {'exclusive': exclusive, 'reverse': reverse})
return cls.enabled
| 238 |
137 | # std
import logging
from typing import List, Optional
# project
from . import LogHandler
from ..parsers.block_parser import BlockParser
from .condition_checkers import BlockConditionChecker
from .condition_checkers.found_blocks import FoundBlocks
from .daily_stats.stats_manager import StatsManager
from src.notifier import Event
class BlockHandler(LogHandler):
"""This handler parses all logs indicating found block
activity by the full node. It holds a list of condition checkers
that are evaluated for each event.
"""
def __init__(self, prefix):
self._parser = BlockParser(prefix)
self._cond_checkers: List[BlockConditionChecker] = [FoundBlocks()]
def handle(self, logs: str, stats_manager: Optional[StatsManager] = None) -> List[Event]:
"""Process incoming logs, check all conditions
and return a list of notable events.
"""
events = []
activity_messages = self._parser.parse(logs)
if stats_manager:
stats_manager.consume_block_messages(activity_messages)
if len(activity_messages) > 0:
# Currently not generating keep-alive events for the full node
logging.debug(f"Parsed {len(activity_messages)} block found messages")
# Run messages through all condition checkers
for msg in activity_messages:
for checker in self._cond_checkers:
event = checker.check(msg)
if event:
events.append(event)
return events
| 556 |
138 | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class SetContactSignUpNotification(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``123``
- ID: ``0xcff43f61``
Parameters:
silent: ``bool``
Returns:
``bool``
"""
__slots__: List[str] = ["silent"]
ID = 0xcff43f61
QUALNAME = "functions.account.SetContactSignUpNotification"
def __init__(self, *, silent: bool) -> None:
self.silent = silent # Bool
@staticmethod
def read(data: BytesIO, *args: Any) -> "SetContactSignUpNotification":
# No flags
silent = Bool.read(data)
return SetContactSignUpNotification(silent=silent)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(Bool(self.silent))
return data.getvalue()
| 797 |
139 | #!/usr/bin/env python
import angr
import claripy
base_address = 0x00100000
success_address = 0x0010111d
failure_address = 0x00101100
flag_length = 15
project = angr.Project("./a.out",main_opts = {"base_addr":base_address})
flag_chars = [ claripy.BVS(f"flag_char{i}",8) for i in range(flag_length) ]
flag = claripy.Concat( *flag_chars + [claripy.BVV(b"\n")]) #making stdin work
state = project.factory.full_init_state(
args = ["./a.out"],
add_options = angr.options.unicorn,
stdin=flag
)
for c in flag_chars:
state.solver.add(c >= ord("!"))
state.solver.add(c <= ord("~"))
sim_manager = project.factory.simulation_manager(state)
sim_manager.explore(find = success_address,avoid = failure_address)
if len(sim_manager.found) > 0:
for found in sim_manager.found:
print(found.posix.dumps(0))
| 358 |
140 | import pytest
import selenium.webdriver
import json
@pytest.fixture(scope='function')
def config():
with open('config.json') as config_file:
config = json.load(config_file)
assert config['browser'] in ['Firefox', 'Chrome', 'Headless Chrome']
assert isinstance(config['implicit_wait'], int)
assert config['implicit_wait'] > 0
return config
@pytest.fixture
def browser(config):
if config['browser'] == 'Firefox':
b = selenium.webdriver.Firefox()
elif config['browser'] == 'Chrome':
b = selenium.webdriver.Chrome()
elif config['browser'] == 'Headless Chrome':
opts = selenium.webdriver.ChromeOptions()
opts.add_argument('headless')
b = selenium.webdriver.Chrome(options=opts)
else:
raise Exception(f'Browser "{config["browser"]}" is not supported')
b.implicitly_wait(config['implicit_wait'])
yield b
# quit the webdriver instance
b.quit()
| 365 |
141 | import napari
import numpy as np
im_data = np.zeros((50, 50, 50))
im_data[30:40, 25:35, 25:35] = 1
viewer = napari.view_image(im_data, colormap='magenta', rendering='iso')
viewer.add_image(im_data, colormap='green', rendering='iso', translate=(30, 0, 0))
points_data = [
[50, 30, 30],
[25, 30, 30],
[75, 30, 30]
]
viewer.add_points(points_data, size=4)
viewer.dims.ndisplay = 3
napari.run() | 180 |
142 | # main.py -- Chapter 4 - Test Harness Example
##################################################################################
# Title : Test Harness Example
# Filename : main.py
# Author : JWB
# Origin Date : 01/07/2019
# Version : 1.0.0
# Copyright : Jacob Beningo
# All Rights Reserved
#
# THIS SOFTWARE IS PROVIDED BY BENINGO EMBEDDED GROUP "AS IS" AND ANY EXPRESSED
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL BENINGO EMBEDDED GROUP OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
##################################################################################
import micropython # For emergency exception buffer
from test_harness import TestHarness_Run
# Buffer for interrupt error messages
micropython.alloc_emergency_exception_buf(100)
############################################################
# Application Constants
############################################################
############################################################
# Application Variables
############################################################
# Setup the MCU and application code to starting conditions
# The blue LED will start on, the yellow LED will be off
def System_Init():
print("Initializing system ...")
print("Starting application ...")
############################################################
#
# Start script execution ...
#
############################################################
# Initialize the system
System_Init()
print("Starting Tests ...")
TestHarness_Run()
print("Testing Completed")
while True:
i = 0 | 640 |
143 | # mysite/routing.py
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import speaker.routing
application = ProtocolTypeRouter({
# (http->django views is added by default)
'websocket': AuthMiddlewareStack(
URLRouter(
speaker.routing.websocket_urlpatterns
)
),
}) | 130 |
144 | '''
Created by auto_sdk on 2015.08.26
'''
from top.api.base import RestApi
class OpenAccountCreateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.param_list = None
def getapiname(self):
return 'taobao.open.account.create'
| 131 |
145 | from typing import List
from qcodes import Instrument
from qcodes.utils import validators
import numpy as np
import time
class DummyChannel(Instrument):
def __init__(self, name: str, *args, **kwargs):
super().__init__(name, *args, **kwargs)
self.add_parameter('ch0',
set_cmd=None,
vals=validators.Numbers(0, 1),
initial_value=0)
self.add_parameter('ch1', unit='v',
set_cmd=None,
vals=validators.Numbers(-1, 1),
initial_value=1)
class DummyInstrumentWithSubmodule(Instrument):
"""A dummy instrument with submodules"""
def __init__(self, name: str, *args, **kwargs):
super().__init__(name, *args, **kwargs)
self.add_parameter('param0',
set_cmd=None,
vals=validators.Numbers(0, 1),
initial_value=0)
self.add_parameter('param1', unit='v',
set_cmd=None,
vals=validators.Numbers(-1, 1),
initial_value=1)
for chan_name in ('A', 'B', 'C'):
channel = DummyChannel('Chan{}'.format(chan_name))
self.add_submodule(chan_name, channel)
def test_func(self, a, b, *args, c: List[int] = [10, 11], **kwargs):
return a, b, args[0], c, kwargs['d'], self.param0()
class DummyInstrumentTimeout(Instrument):
"""A dummy instrument to test timeout situations"""
def __init__(self, name: str, *args, **kwargs):
super().__init__(name, *args, **kwargs)
self.random = np.random.randint(10000)
def get_random(self):
return self.random
def get_random_timeout(self):
time.sleep(10)
return self.random
| 952 |
146 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-10 18:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('IoT_MaintOps', '0009_auto_20180410_1041'),
]
operations = [
migrations.AlterField(
model_name='blueprint',
name='equipment_unique_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='blueprints', related_query_name='blueprint', to='IoT_DataMgmt.EquipmentUniqueType'),
),
migrations.AlterField(
model_name='blueprint',
name='timestamp',
field=models.DateTimeField(default=None),
),
migrations.AlterField(
model_name='blueprint',
name='trained_to_date',
field=models.DateField(default=None),
),
]
| 439 |
147 | """
Elabore um programa que calcule o valor a ser pago por um produto,
considerando o seu preço normal e condição de pagamento:
À vista dinheiro/cheque: 10% de desconto
Á vista no cartão: 5% de desconto
Em até 2x no cartão: preço normal
3x ou mais no cartão: 20% de juros
É só isso
Não tem mais jeito
Acabou
Boa sorte
Não tenho o que dizer
São só palavras
E o que eu sinto
Não mudará
Boa Sorte - Vanessa da Mata, Ben Harper ♪♫
"""
t = ''
preco = float(input('Informe o preço do produto: '))
print('Escolha a condição de pagamento: ')
print('[ 1 ] À vista dinheiro/cheque, 10% de desconto.')
print('[ 2 ] À vista no cartão, 5% de desconto.')
print('[ 3 ] Em até 2x no cartão, preço normal.')
print('[ 4 ] 3x ou mais no cartão, 20% de juros.')
cp = int(input('Escolha uma opção de pagamento: '))
if cp == 1:
v = preco * 0.10
preco -= v
t = ' desconto '
elif cp == 2:
v = preco * 0.05
preco -= v
t = ' desconto '
elif cp == 3:
v = 0
t = 'nada'
elif cp == 4:
v = preco * 0.20
preco += v
t = ' aumento '
print('O valor total a ser pago é de R$ {:.2f}'.format(preco))
if t.strip() != 'nada':
print('Houve um{}de R$ {}'.format(t, v))
| 574 |
148 | """This module defines the `picharsso draw gradient` command.
Refer to https://kelvindecosta.github.io/picharsso/commands/draw/gradient/.
"""
import click
from ...draw import new_drawer
from ...draw.gradient import DEFAULT_CHARSET
@click.command("gradient", options_metavar="[options]")
@click.option(
"-s",
"--charset",
type=str,
help="Character set ordered by increasing 'brightness'.",
default=DEFAULT_CHARSET,
show_default=True,
)
@click.option(
"-n", "--negative", is_flag=True, help="Whether to invert output text brightness."
)
@click.pass_context
def draw_gradient(context, charset, negative):
"""Use the gradient style."""
image = context.obj.pop("image")
drawer = new_drawer("gradient", charset=charset, negative=negative, **context.obj)
print(drawer(image))
| 284 |
149 | """A WebSocket handler for Treadmill state.
"""
import os
import logging
import yaml
from treadmill import schema
_LOGGER = logging.getLogger(__name__)
class IdentityGroupAPI(object):
"""Handler for /identity-groups topic."""
def __init__(self):
"""init"""
@schema.schema({'$ref': 'websocket/identity_group.json#/message'})
def subscribe(message):
"""Return filter based on message payload."""
identity_group = message.get('identity-group', '*')
return [(os.path.join('/identity-groups', identity_group), '*')]
def on_event(filename, operation, content):
"""Event handler."""
if not filename.startswith('/identity-groups/'):
return
sow = operation is None
full_identity = filename[len('/identity-groups/'):]
identity_group, identity = full_identity.rsplit('/', 1)
message = {
'topic': '/identity-groups',
'identity-group': identity_group,
'identity': int(identity),
'app': None,
'host': None,
'sow': sow
}
if content:
message.update(yaml.load(content))
return message
self.subscribe = subscribe
self.on_event = on_event
def init():
"""API module init."""
return [('/identity-groups', IdentityGroupAPI(), [])]
| 672 |
150 | #!/usr/bin/env python
import os,sys
spec = 'VODUpload.podspec'
if len(sys.argv) == 0:
print('please input version')
else:
version = sys.argv[1]
with open(spec) as f:
lines = f.readlines()
for i in range(len(lines)):
if lines[i].find('s.version =') != -1:
lines[i] = " s.version = \"" + version + "\"" + "\n"
f.close
with open(spec,'w+') as wf:
wf.writelines(lines)
wf.close
tag = "git tag -a "+version+" -m '"+version+"'"
trunk = 'pod trunk push ' + spec + ' --verbose' + ' --allow-warnings'
os.system('git add .')
os.system('git commit -m release')
os.system('git push')
os.system(tag)
os.system('git push --tags')
os.system(trunk)
| 405 |
151 | valores = []
while True:
valores.append(int(input("Digite um valor: ").strip()))
while True:
resposta = str(input("\nDeseja continuar? "
"Digite [S]im ou [N]ão.\nSua resposta: ").strip().upper()[0])
if resposta != 'S' and resposta != 'N':
print("\nResposta inválida! Tente novamente!")
elif resposta == 'S':
print("-="*30)
break
elif resposta == 'N':
print("\n", "-="*30)
break
if resposta == 'N':
break
valores.sort(reverse=True)
print(f"Ao todo, foram digitados {len(valores)} números.\nEm ordem descrescente, são {valores}.")
if 5 in valores:
print(f"O valor '5' foi encontrado na(s) posição(ões):...", end="")
for i in range(len(valores)):
if valores[i] == 5:
print(f"{i}...", end="")
print("\nAté a próxima...")
else:
print("O valor '5' não foi encontrado na lista.")
print("Até a próxima...")
| 507 |
152 | from xml.dom import minidom
from xml.sax.saxutils import unescape
import os
import click
@click.command()
@click.argument('source')
@click.argument('destination')
@click.option('--blend_func', '-b', required=False, type=str, default='mean',
help="Mathematical function to be used for blending overlapping pixels in VRT. "
"Valid options are: min, mean, max. Default: mean")
def main(source, destination, blend_func='mean'):
# check the input blend_func
if blend_func not in ('min', 'max', 'mean'):
raise ValueError
# make sure the output file doesn't already exist
if os.path.exists(destination) is True:
raise FileExistsError("Destination file already exists, cannot overwrite.")
# make sure the input exists
if os.path.exists(source) is False:
raise FileExistsError("Source file does not exist.")
# open the vrt
xmldoc = minidom.parse(source)
# get the VRTRasterBand Element
vrt_raster_band = xmldoc.getElementsByTagName('VRTRasterBand')[0]
# add the subclass attribute tag
vrt_raster_band.setAttribute('subClass', "VRTDerivedRasterBand")
# get the no_data value
no_data_value = xmldoc.getElementsByTagName('NoDataValue')[0].childNodes[0].nodeValue
# add the pixelfunction tags
new_elements = {'PixelFunctionType': 'blend',
'PixelFunctionLanguage': 'Python',
'PixelFunctionCode': '''<![CDATA[
import numpy as np
def blend(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,
raster_ysize, buf_radius, gt, **kwargs):
no_data = np.array({no_data_value}, out_ar.dtype).tolist()
in_ar_masked = np.ma.masked_array([out_ar] * len(in_ar), mask=False)
for i, a in enumerate(in_ar):
in_ar_masked[i, :, :] = a
in_ar_masked.mask[i, :, :] = (a == no_data)
out_ar[:] = in_ar_masked.{blend_func}(axis = 0).filled(no_data)
]]>'''.format(no_data_value=no_data_value,
blend_func=blend_func)}
for k, v in new_elements.items():
new_child_element = xmldoc.createElement(k)
new_child_text = xmldoc.createTextNode(v)
new_child_element.appendChild(new_child_text)
vrt_raster_band.appendChild(new_child_element)
with open(destination, 'w') as o:
o.write(unescape(xmldoc.toprettyxml()[22:]))
if __name__ == '__main__':
main()
| 1,013 |
153 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
config settings, will be used in finetune.py
"""
from easydict import EasyDict as edict
import mindspore.common.dtype as mstype
from mindspore.model_zoo.Bert_NEZHA import BertConfig
cfg = edict({
'task': 'NER',
'num_labels': 41,
'data_file': '/your/path/evaluation.tfrecord',
'schema_file': '/your/path/schema.json',
'finetune_ckpt': '/your/path/your.ckpt',
'use_crf': False,
'clue_benchmark': False,
})
bert_net_cfg = BertConfig(
batch_size=16 if not cfg.clue_benchmark else 1,
seq_length=128,
vocab_size=21128,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32,
compute_type=mstype.float16,
)
| 600 |
154 | #!/usr/bin/env python
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# from __future__ import print_function
import os
import re
import sys
from common import *
doc_dir = os.path.normpath(
os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), "../")
)
puml_path = os.path.relpath(sys.argv[1], os.getcwd())
name_no_ext = os.path.splitext(os.path.basename(puml_path))[0]
plantuml_jar = doc_dir + "/diagrams/plantuml/plantuml.jar"
template = load_file(doc_dir + "/scripts/templates/diagram.template")
#
# Extract diagram width
#
max_width = 900
svg = open(doc_dir + "/diagrams/gen/svg/" + name_no_ext + ".svg", "r")
first = svg.readline()
pattern = re.compile('viewBox="0 0 ([0-9]+) ([0-9]+)"')
result = re.search(pattern, first)
if not result:
print("width not found!")
width = max_width
else:
width = int(result.group(1))
if width > max_width:
scaled_width = max_width
else:
scaled_width = width
#
# Generate the PNG image from the PUML diagram
#
output_dir = os.path.join(doc_dir + "/html")
# plantuml '-output' option is always relative to the source file,
# so we need to compute the generation directory relative to it!
relative_gen_dir = os.path.relpath(output_dir, os.path.dirname(puml_path))
# Disable diagram generation: assume it was done at a previous stage
# (The Mafile is more efficient and avoid CPU intensive re-generation)
# os.system('java -jar %s -tpng -output %s %s' % \
# (plantuml_jar, relative_gen_dir, puml_path))
#
# Generate a page that includes the diagram image
#
# html_relative_path = os.path.join('./', name_no_ext+'.png')
html_relative_path = os.path.join("../diagrams/gen/svg", name_no_ext + ".svg")
package = puml_path.replace(os.getcwd(), "").split(os.sep)[0]
data = {
"package": package,
"diagram_name": name_no_ext,
"diagram_path": html_relative_path,
"width": scaled_width,
}
print((template % data))
| 900 |
155 | # Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import pytest
from ote_sdk.configuration.default_model_parameters import DefaultModelParameters
from ote_sdk.tests.constants.ote_sdk_components import OteSdkComponent
from ote_sdk.tests.constants.requirements import Requirements
@pytest.mark.components(OteSdkComponent.OTE_SDK)
class TestModelConfiguration:
@pytest.mark.priority_medium
@pytest.mark.component
@pytest.mark.reqids(Requirements.REQ_1)
def test_model_configuration(self):
mc = DefaultModelParameters()
assert hasattr(mc, "learning_parameters")
epoch_default = mc.learning_parameters.get_metadata("epochs")["default_value"]
batch_size_default = mc.learning_parameters.get_metadata("batch_size")[
"default_value"
]
assert mc.learning_parameters.epochs == epoch_default
assert mc.learning_parameters.batch_size == batch_size_default
mc.learning_parameters.epochs = epoch_default + 5
mc.learning_parameters.batch_size = batch_size_default + 4
assert mc.learning_parameters.batch_size == batch_size_default + 4
assert mc.learning_parameters.epochs == epoch_default + 5
| 457 |
156 | import importlib
import pytest
from docs_src.tutorial.fastapi.app_testing.tutorial001 import main as app_mod
from docs_src.tutorial.fastapi.app_testing.tutorial001 import test_main_002 as test_mod
@pytest.fixture(name="prepare", autouse=True)
def prepare_fixture(clear_sqlmodel):
# Trigger side effects of registering table models in SQLModel
# This has to be called after clear_sqlmodel
importlib.reload(app_mod)
importlib.reload(test_mod)
def test_tutorial():
test_mod.test_create_hero()
| 175 |
157 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# see http://docs.python.org/dist/dist.html
#
"""
Copyright (c) 2009 John Markus Bjoerndalen <jmb@cs.uit.no>,
Brian Vinter <vinter@diku.dk>, Rune M. Friborg <runef@diku.dk>.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software. THE
SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from setuptools import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='pycsp',
version='0.9.2',
description='PyCSP - Python CSP Library',
long_description=read('README.md'),
keywords = "python csp concurrency parallel distributed communicating sequential processes",
author='Rune M. Friborg',
author_email='rune.m.friborg@gmail.com',
url='https://github.com/runefriborg/pycsp',
license='MIT',
packages=['pycsp', 'pycsp.parallel', 'pycsp.greenlets', 'pycsp.common', 'pycsp.current'],
platforms=['any'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
],
)
| 725 |
158 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import yaml
class ConfYaml(object):
def __init__(self, confname):
with open('./etc/imagescraper.default.conf.yaml', 'r') as stream:
try:
self.__default_data = yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as yaml_error:
print(yaml_error)
with open('./etc/imagescraper.{}.conf.yaml'.format(confname), 'r') as stream:
try:
self.__data = yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as yaml_error:
print(yaml_error)
def get_login_info(self):
return self.__data["login_info"] if self.__data["login_info"] else self.__default_data["login_info"]
def get_page_urls(self):
return self.__data["page_urls"] if self.__data["page_urls"] else self.__default_data["page_urls"]
def get_image_url_match_pattern(self):
return self.__data["image_url_match_pattern"] if self.__data["image_url_match_pattern"] else self.__default_data["image_url_match_pattern"]
def get_save_dir(self):
return self.__data["save_dir"] if self.__data["save_dir"] else self.__default_data["save_dir"]
def get_file_rename_pattern(self):
return self.__data["file_rename_pattern"] if self.__data["file_rename_pattern"] else self.__default_data["file_rename_pattern"]
def get_file_rename_string(self):
return self.__data["file_rename_string"] if self.__data["file_rename_string"] else self.__default_data["file_rename_string"]
| 686 |
159 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
from tvm import te
import logging
import sys, time, subprocess
import json
import os
def schedule(attrs):
cfg, s, output = attrs.auto_config, attrs.scheduler, attrs.outputs[0]
th_vals, rd_vals = [attrs.get_extent(x) for x in output.op.axis], [attrs.get_extent(x) for x in output.op.reduce_axis]
cfg.define_reorder("reorder_axis", [i for i in range(len(output.op.axis))], "all")
perm = cfg["reorder_axis"].perm
vthreads = []
for i in range(len(th_vals)):
if i < 3:
s[output].bind(output.op.axis[perm[i]], te.thread_axis(f'blockIdx.{i}'))
else:
s[output].bind(output.op.axis[perm[i]], te.thread_axis(f'vthread'))
vthreads.append(output.op.axis[perm[i]])
s[output].reorder(*vthreads)
| 335 |
160 | import os
from .base import *
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| 150 |
161 | import numpy as np
from hmf.cosmology import growth_factor
from astropy.cosmology import Planck13
import pytest
@pytest.fixture(scope="module")
def gf():
return growth_factor.GrowthFactor(Planck13)
@pytest.fixture(scope="module")
def genf():
return growth_factor.GenMFGrowth(Planck13, zmax=10.0)
@pytest.mark.parametrize("z", np.arange(0, 8, 0.5))
def test_gf(z, gf, genf):
print(gf.growth_factor(z), genf.growth_factor(z))
assert np.isclose(
gf.growth_factor(z), genf.growth_factor(z), rtol=1e-2 + z / 500.0,
)
@pytest.mark.parametrize("z", np.arange(0, 8, 0.5))
def test_gr(z, gf, genf):
gf.growth_rate(z), genf.growth_rate(z)
assert np.isclose(gf.growth_rate(z), genf.growth_rate(z), rtol=1e-2 + z / 100.0)
def test_gfunc(gf, genf):
gf_func = gf.growth_factor_fn(0.0)
genf_func = genf.growth_factor_fn(0.0)
print(gf_func(np.linspace(0, 5, 10)), genf_func(np.linspace(0, 5, 10)))
assert np.allclose(
gf_func(np.linspace(0, 5, 10)), genf_func(np.linspace(0, 5, 10)), rtol=1e-2
)
def test_gr_func(gf, genf):
gr_func = gf.growth_rate_fn(0.0)
genf_func = genf.growth_rate_fn(0.0)
print(gr_func(np.linspace(0, 5, 10)), genf_func(np.linspace(0, 5, 10)))
assert np.allclose(
gr_func(np.linspace(0, 5, 10)), genf_func(np.linspace(0, 5, 10)), rtol=1e-2
)
def test_inverse(gf, genf):
gf_func = gf.growth_factor_fn(0.0, inverse=True)
genf_func = genf.growth_factor_fn(0.0, inverse=True)
gf = np.linspace(0.15, 0.99, 10)
print(gf_func(gf), genf_func(gf))
assert np.allclose(gf_func(gf), genf_func(gf), rtol=1e-1)
| 802 |
162 | import hashlib
def get_md5_hash():
_api_key = '00000000-0000-0000-0000-000000000000'
_secret = '1234567890ABCDEF'
_routing_id = '1464524676'
routing_key_data = str(_api_key) + str(_routing_id) + str(_secret)
digest_string = hashlib.md5(routing_key_data.encode('utf-8')).hexdigest()
print("Digest string " + digest_string)
if __name__ == '__main__':
get_md5_hash()
| 166 |
163 | #!/usr/bin/env python3
import configparser
import logging
import os
import pathlib
from daisho.client import daisho_cli
from daisho.helpers import daisho_help
from daisho.server import daisho_db
HOME = os.getenv("HOME")
DAISHO_HOME = HOME + "/.config/daisho/"
CONFIG = DAISHO_HOME + "daisho.conf"
HISTORY = DAISHO_HOME + "history.txt"
LOG_FILE = DAISHO_HOME + "daisho.log"
daisho_logger = logging.getLogger(__name__)
def generate_config():
# Check existence of CONFIG
print("\n\t- Welcome to Daisho -\n")
print("Initial setup:")
print("\tCreating Daisho's configurations")
# Create HOME, CONFIG, HISTORY, and LOG_FILE
pathlib.Path(DAISHO_HOME).mkdir()
pathlib.Path(CONFIG).touch(exist_ok=True)
pathlib.Path(HISTORY).touch(exist_ok=True)
# pathlib.Path(LOG_FILE).touch(exist_ok=True)
# Write Daisho's configuration file
conf_parser = configparser.ConfigParser()
conf_parser.add_section("Global")
conf_parser.set("Global", "DAISHO_HOME", DAISHO_HOME)
conf_parser.set("Global", "CONFIG", CONFIG)
conf_parser.set("Global", "HISTORY", HISTORY)
conf_parser.set("Global", "LOG_FILE", LOG_FILE)
with open(CONFIG, "w") as config_file:
conf_parser.write(config_file)
print("\tDone")
# Configure logging from here
logging.basicConfig(filename=LOG_FILE, level=logging.INFO)
logging.info("Generating configuration files.")
logging.info("#### Daisho starting up ####")
# Check if we are able to connect to MongoDB.
daisho_db.mongo_conn()
daisho_help.usage()
daisho_cli.shell()
logging.info("Started Daisho prompt.")
| 622 |
164 | #!/usr/bin/env python
import sys
import os
from ccg import *
TRANSFORM = sys.argv[1]
transform = trans.__dict__[TRANSFORM]
FILTER = ''
if len(sys.argv) == 3:
FILTER = sys.argv[2]
if FILTER in ['dev', 'train', 'test']:
FILTER = bank.__dict__[FILTER]
for deriv in bank.visit(transform, bank.iter('../data/CCGbank1.2', FILTER)):
print deriv.stags()
| 149 |
165 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Vidar Tonaas Fauske
# Distributed under the terms of the Modified BSD License.
def _jupyter_nbextension_paths():
return [{
'section': 'notebook',
'src': 'nbextension/static',
'dest': 'jupyter-combobox',
'require': 'jupyter-combobox/extension'
}]
| 155 |
166 | # -------------------------------------------------------------------
# Copyright 2021 Virtex authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# -------------------------------------------------------------------
raise NotImplementedError
| 170 |
167 | from leia import SentimentIntensityAnalyzer
import re
class AcessoPLN(object):
def __init__(self):
self.leia = SentimentIntensityAnalyzer()
def sentiment_analysis(self, text):
leia_score = self.leia.polarity_scores(text)
if leia_score['compound'] >= 0.05:
result = True
elif leia_score['compound'] > -0.05:
result = None
else:
result = False
sentiment_score = {
'positive_percent': leia_score['pos'],
'negative_percent': leia_score['neg'],
'neutral_percent': leia_score['neu'],
'sentiment_coeficient': leia_score['compound'],
'sentiment_result': result
}
print('A classificação de sentimento do texto "%s" resultou nos coeficientes %s' % (text, sentiment_score))
return sentiment_score
def treat_stopwords(self, text):
stopword_list = self.read_txt_list('./lexicons/stopwords.txt')
word_list = re.sub("[^\w]", " ", text).split()
treated_word_list = list()
for word in word_list:
if word.lower() not in stopword_list:
treated_word_list.append(word)
teated_text = ' '.join(treated_word_list)
return teated_text
@staticmethod
def read_txt_list(file_name, keep_void=False):
final_list = []
with open(file_name, 'r') as file:
txt_list = file.readlines()
for line in txt_list:
line = line.strip().replace('\n', '').replace('\ufeff', '')
if line != '' or keep_void:
final_list.append(line)
return final_list
| 779 |
168 | from django_tex.environment import environment
def latex_safe(value):
"""
Filter that replace latex forbidden character by safe character
"""
return str(value).replace('_', '\_').replace('$', '\$').replace('&', '\&').replace('#', '\#').replace('{', '\{').replace('}','\}')
def my_environment(**options):
env = environment(**options)
env.filters.update({
'latex_safe': latex_safe
})
return env | 157 |
169 | import logging
logging.basicConfig(level=logging.INFO,
format=u'%(filename)s [LINE:%(lineno)d] #%(levelname)-8s [%(asctime)s] %(message)s',
handlers=[
logging.FileHandler("debug.log"),
logging.StreamHandler()
]
)
| 219 |
170 | # Title: 줄 세우기
# Link: https://www.acmicpc.net/problem/2252
import sys
import queue
import heapq
sys.setrecursionlimit(10 ** 6)
def read_list_int():
return list(map(int, sys.stdin.readline().strip().split(' ')))
def read_single_int():
return int(sys.stdin.readline().strip())
def get_line(students, indegrees, N):
line = []
qu = queue.Queue()
for number in range(1, N+1):
if indegrees[number] == 0:
# heapq.heappush(qu, number)
qu.put(number)
while qu.qsize() > 0:
# zero_indegree = heapq.heappop(qu)
zero_indegree = qu.get()
line.append(zero_indegree)
for number in students[zero_indegree]:
indegrees[number] -= 1
if indegrees[number] == 0:
# heapq.heappush(qu, number)
qu.put(number)
return ' '.join(map(str, line))
if __name__ == '__main__':
N, M = read_list_int()
students = [[] for _ in range(N+1)]
indegrees = [0 for _ in range(N+1)]
for _ in range(M):
a, b = read_list_int()
students[a].append(b)
indegrees[b] += 1
print(get_line(students, indegrees, N)) | 619 |
171 | # Generated by Django 3.0.12 on 2021-03-01 21:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('countries', '0018_auto_20210301_1447'),
]
operations = [
migrations.CreateModel(
name='BorderCountry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('border', models.PositiveIntegerField(help_text='в метрах', verbose_name='Протяженность границы')),
('border_country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='neighbour', to='countries.Country')),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='countries.Country')),
],
),
migrations.AddField(
model_name='country',
name='border_countries',
field=models.ManyToManyField(related_name='_country_border_countries_+', through='countries.BorderCountry', to='countries.Country', verbose_name='Соседние страны'),
),
]
| 536 |
172 | from .AbstractReducer import AbstractReducer
import numpy as np
import warnings
from lpproj import LocalityPreservingProjection
class LPP(AbstractReducer):
def __init__(self, d: int = 2, random_state: int = 0, **kwargs):
super().__init__(d, random_state)
warnings.warn("Setting random seed does not affect LPP.", UserWarning)
self._main = LocalityPreservingProjection(n_components = d)
def fit_transform(self, x: np.ndarray, **kwargs) -> np.ndarray:
return self._main.fit_transform(x)
def fit(self, x: np.ndarray, **kwargs):
return self._main.fit(x)
def transform(self, x: np.ndarray, **kwargs) -> np.ndarray:
return self._main.transform(x)
def set_random_state(self, random_state: int = 0):
warnings.warn("Setting random seed does not affect LPP.", UserWarning)
@property
def is_deterministic(self) -> bool:
return True
@property
def is_stateful(self) -> bool:
return True
@staticmethod
def get_parameter_ranges() -> dict:
return {
'n_neighbors': (int, 2, 300)
}
| 444 |
173 | # python std modules
# third party modules
from setuptools import find_packages
from setuptools import setup
long_description = """small modules and tools useful for many projects
"""
setup(name="mytb",
version="0.1.1",
description="my toolbox for everyday python projects",
long_description=long_description,
long_description_content_type="text/x-rst",
classifiers=[
"Development Status :: 3 - Alpha",
],
keywords="toolbox development",
url="https://github.com/feenes",
author="Teledomic",
author_email="info@teledomic.eu",
license="MIT",
packages=find_packages(),
scripts=[],
entry_points={
"console_scripts": [
"mytb = mytb.commands:main",
]
},
project_urls={
"Homepage": "https://github.com/feenes/mytb",
"Documentation": "https://github.com/feenes/mytb",
"Source": "https://github.com/feenes/mytb",
"SayThanks": "https://github.com/feenes",
"Funding": "https://donate.pypi.org",
"Tracker": "https://github.com/feenes/mytb/issues",
},
install_requires=[
"minibelt",
"unidecode",
],
extras_require=dict(
minimal=[],
all=[
"dateutils",
"ddt",
"pytz",
"pyyaml",
"tzlocal",
"unidecode",
],
date=[
"dateutils",
"pytz",
"tzlocal",
],
gitlab=[
"ddt",
"pyyaml",
],
),
python_requires=">=3.6, <4",
setup_requires=["pytest-runner"],
tests_require=["pytest", "pytest-tempdir"],
zip_safe=False,
include_package_data=True,
)
| 900 |
174 | """
OBJ :
1 - Tipos de Números
2 - Funções e Operações
3 - Aritmética
4 - Operadores
-------------------------------------
1) tipos:
O python possui Vários tipos os Mais comuns são:
- int --> Números Interios, positivos ou Negativos: 1,2,-7
- float --> Números Francionários, positivos ou negativos: -7.5 , 8.4
2) função:
type() - Para descobir qual é o tipo
int() ou float() para converter números
3) Operadores :
Com Números:
--------------
+ -> soma --> 2+2 = 4
- -> Subtrai --> 4-1 = 3
* - > Multiplica --> 2*2 = 4
/ Divide --> 40/2 = 20
** --> Potência --> 2**2 = 4
% --> Módulo --> 4**2 = 16
int() - Converte para inteiro --> int(3.2) = 3
float() Converte para float --> float(4) = 4.0
Relacionais
-------------
== (Igualdade, equivalência)
!= (Desigualdade)
> (Maior Que)
< (Menor Que)
>= (Maior que ou Igual a)
<= (Menor que ou Igual a)
""" | 388 |
175 | import pytest
from . import db
from .db import database
from tagtrain import data
def test_unknown_owner(database):
with pytest.raises(data.Group.DoesNotExist):
data.by_owner.remove_group('non-existent', db.GROUP_NAME)
def test_unknown_group(database):
with pytest.raises(data.Group.DoesNotExist):
data.by_owner.remove_group(db.OWNER_NAME, 'non-existent')
def test_good(database):
group = data.by_owner.find_group(db.OWNER_NAME, db.GROUP_NAME)
data.by_owner.remove_group(db.OWNER_NAME, db.GROUP_NAME)
with pytest.raises(data.Group.DoesNotExist):
group = data.by_owner.find_group(db.OWNER_NAME, db.GROUP_NAME)
| 260 |
176 | from .base import ErConnector, DataHelper
from .company import Company
class Contact(DataHelper):
def __init__(self, contact_id, data=None):
self.contact_id = contact_id
if not data:
# Fetch from remote
self.refresh()
else:
# Allows it to be populated by list_communication_methods without an additional fetch
self.data = data
# self.refresh(fetch=False)
self.populate_from_data()
def refresh(self):
self.data = get_contact_by_id(self.contact_id).data
self.populate_from_data()
def populate_from_data(self):
self.first_name = self.data.get('First', None)
self.last_name = self.data.get('Last', None)
self.company_id = self.data.get('CompanyID', None)
self.title = self.data.get('Title', None)
self.company_name = self.company().name
def company_id(self):
return self.get_field('CompanyID')
def company(self):
return Company(self.get_field('CompanyID'))
def get_contact_by_id(id):
connector = ErConnector() # 2.0 API
url = 'Contact/{id}'.format(
id=id,
)
response = connector.send_request(
path=url,
verb='GET',
)
return Contact(response['ID'], data=response) | 558 |
177 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-07 05:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_userprofile'),
]
operations = [
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=80)),
],
),
migrations.CreateModel(
name='Security',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.RemoveField(
model_name='userprofile',
name='user',
),
migrations.CreateModel(
name='Company',
fields=[
('place', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='accounts.Place')),
('serves_hardware_solutions', models.BooleanField(default=False)),
('serves_network_solutions', models.BooleanField(default=False)),
],
),
migrations.DeleteModel(
name='UserProfile',
),
migrations.AddField(
model_name='security',
name='company',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Company'),
),
]
| 844 |
178 | from .backend_qt import (
backend_version, SPECIAL_KEYS,
# Public API
cursord, _create_qApp, _BackendQT, TimerQT, MainWindow, FigureCanvasQT,
FigureManagerQT, ToolbarQt, NavigationToolbar2QT, SubplotToolQt,
SaveFigureQt, ConfigureSubplotsQt, SetCursorQt, RubberbandQt,
HelpQt, ToolCopyToClipboardQT,
# internal re-exports
FigureCanvasBase, FigureManagerBase, MouseButton, NavigationToolbar2,
TimerBase, ToolContainerBase, figureoptions, Gcf
)
@_BackendQT.export
class _BackendQT5(_BackendQT):
pass
| 231 |
179 | # Generated by Django 3.0.7 on 2020-06-17 07:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 690 |
180 | """A handler designed for use with Ignite which stores the value of some object,
possibly after applying some transformation"""
import traceback
import torch
from ignite.engine import Engine
class ObjectLogger(object):
"""A handler which grabs and stores some object, possibly with a
transformation"""
def __init__(self, engine, retrieve_fn, clear_event=None):
"""Accepts the engine to run over and the name of the object to watch
:param engine: an instance of ignite.engine.Engine
:param retrieve_fn: function to retrieve the object with
:param clear_event: optional event to register to clear the values list
"""
if not isinstance(engine, Engine):
raise TypeError("Argument engine should be an Engine")
self.engine = engine
self.retrieve_fn = retrieve_fn
self.values = list()
if clear_event is not None:
self.engine.add_event_handler(clear_event, self.clear)
def __call__(self, engine):
try:
self.values.append(self.retrieve_fn(engine))
except Exception as e:
print(
"Unable to retrieve object from the state using %s"
% self.retrieve_fn.__name__
)
traceback.print_exc()
def clear(self):
self.values = list()
| 513 |
181 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-10 14:05
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
from corehq.sql_db.operations import RawSQLMigration
migrator = RawSQLMigration(('custom', 'icds_reports', 'migrations', 'sql_templates', 'database_views'))
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0060_disha_indicators'),
]
operations = []
| 173 |
182 | def userContextToApplicationContext(valueToBeChanged: '', incomingDir: str=''):
if incomingDir == 'right':
if valueToBeChanged == 'straight':
return 'right'
if valueToBeChanged == 'right':
return 'down'
if valueToBeChanged == 'left':
return 'up'
if valueToBeChanged == 'back':
return 'left'
else:
if incomingDir == 'left':
if valueToBeChanged == 'straight':
return 'left'
if valueToBeChanged == 'right':
return 'up'
if valueToBeChanged == 'left':
return 'down'
if valueToBeChanged == 'back':
return 'right'
else:
if incomingDir == 'up':
if valueToBeChanged == 'straight':
return 'up'
if valueToBeChanged == 'right':
return 'right'
if valueToBeChanged == 'left':
return 'left'
if valueToBeChanged == 'back':
return 'down'
else:
if incomingDir == 'down':
if valueToBeChanged == 'straight':
return 'down'
if valueToBeChanged == 'right':
return 'left'
if valueToBeChanged == 'left':
return 'right'
if valueToBeChanged == 'back':
return 'up'
return valueToBeChanged | 848 |
183 | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestBaseSampler(unittest.TestCase):
def test_should_sample_abstract(self):
from opencensus.trace import samplers
sampler = samplers.Sampler()
mock_context = mock.Mock()
mock_context.trace_id = 'fake_id'
with self.assertRaises(NotImplementedError):
sampler.should_sample(mock_context)
| 311 |
184 | import subprocess
import logging
log = logging.getLogger(__name__)
# Active window functions
def get_pname(id) -> str:
p = subprocess.Popen(["ps -o cmd= {}".format(id)], stdout=subprocess.PIPE, shell=True)
return p.communicate()[0].decode('utf-8').strip()
def get_active_application_name() -> str:
p = subprocess.Popen(['xdotool', 'getwindowfocus'], stdout=subprocess.PIPE)
output, _ = p.communicate()
p = subprocess.Popen(['xprop', '-id', output, 'WM_CLASS'], stdout=subprocess.PIPE)
output, _ = p.communicate()
name = output.decode('ascii').strip().split(',')[-1].strip().replace('"', '')
return name
def get_active_window_pid() -> int:
p = subprocess.Popen(['xdotool', 'getwindowfocus', 'getwindowpid'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, _ = p.communicate()
pid = int(output)
return pid
def get_active_window_title() -> str:
p = subprocess.Popen(['xdotool', 'getwindowfocus', 'getwindowname'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, _ = p.communicate()
wname = output.decode('utf-8').strip()
return wname
def get_idle_time_s() -> int:
p = subprocess.Popen(['xprintidle'], stdout=subprocess.PIPE)
output, _ = p.communicate()
return int(output)/1000 | 563 |
185 | import os
import re
import sqlite3
from ast import parse
from configparser import SafeConfigParser
def ZData(FullFile, IniFiles):
# INI Loading
Config = None
if len(IniFiles) > 0:
print("Loading INI Settings")
Config = SafeConfigParser()
InisFound = Config.read(IniFiles)
for Ini in InisFound:
print(" ", Ini)
# Database Loading
DBPath = "GAMEDATA.sqlite"
if os.path.isfile(DBPath):
Database = sqlite3.connect(DBPath).cursor()
else:
Database = None
del DBPath
if Config:
# JSON Loading
print("Loading JSON Data")
for Section in Config:
for Key, Value in Config.items(Section):
if Key[0] == "j":
JsonFile = Value + ".json"
print(" ", JsonFile)
with open(JsonFile) as Input:
Value = "".join(Input)
Config[Section][Key] = Value
# INI Evaluation
print("Evaluating INI Settings")
for Section in Config:
for Key, Value in Config.items(Section):
if Key[0] in ["i", "d"]:
Temp = eval(
compile(parse(Value, mode="eval"), "<string>", "eval"))
if Key[0] == "i":
Temp = int(Temp)
Config[Section][Key] = str(Temp)
# End
return {"CONFIG": Config, "DATABASE": Database}
| 752 |
186 | import os
from pathlib import Path
import shutil
import subprocess
import sys
import tempfile
import pytest
testdir = Path(__file__).parent
rootdir = testdir.parent
path_16 = testdir.joinpath('utf16.txt')
path_8 = testdir.joinpath('utf8.txt')
@pytest.fixture(scope='function')
def tmp_out():
tempdir = tempfile.mkdtemp()
temp_path = os.path.join(tempdir, 'testfile')
# tf = tempfile.NamedTemporaryFile(delete=False)
# tf.file.close()
# os.chmod(tf.name, mode=-0o777)
yield temp_path
shutil.rmtree(tempdir)
def test_16_to_8(tmp_out):
completed = subprocess.run(
['python',
str(rootdir.joinpath('convencode.py')),
str(path_16),
'utf-16',
tmp_out,
'utf-8',
],
stdout=sys.stdout,
stderr=sys.stderr
)
completed.check_returncode()
with open(path_8, 'rb') as f_expected:
expected = f_expected.read()
with open(tmp_out, 'rb') as f_actual:
actual = f_actual.read()
assert actual == expected
def test_8_to_16(tmp_out):
completed = subprocess.run(
['python',
str(rootdir.joinpath('convencode.py')),
str(path_8),
'utf-8',
tmp_out,
'utf-16',
],
stdout=sys.stdout,
stderr=sys.stderr
)
assert completed.returncode == 0
with open(path_16, 'rb') as f_expected:
expected = f_expected.read()
with open(tmp_out, 'rb') as f_actual:
actual = f_actual.read()
assert actual == expected
| 717 |
187 | # Generated by generate_protobuf.sh.
# Contains all messages in *_pb2_grpc.py in a single module.
from .data_set_messages_pb2_grpc import *
from .data_set_service_pb2_grpc import *
from .entity_messages_pb2_grpc import *
from .entity_service_pb2_grpc import *
from .internal_entity_service_pb2_grpc import *
from .relationship_messages_pb2_grpc import *
from .relationship_service_pb2_grpc import *
from .search_messages_pb2_grpc import *
from .service_pb2_grpc import *
from .signal_messages_pb2_grpc import *
from .signal_service_pb2_grpc import *
from .time_series_messages_pb2_grpc import *
from .time_series_service_pb2_grpc import *
| 234 |
188 | from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import numpy as np
from keras.models import load_model
from PIL import Image
import sys
from keras import backend as K
def api_call(file_name):
K.clear_session()
model = load_model('vimana/model.h5')
pic = Image.open("data/" + file_name)
Pic = np.array(pic)
x = Pic.reshape((1,)+Pic.shape+(1,))
val = model.predict(x)
print(val[0].argmax(axis=0))
return val[0].argmax(axis=0)
def get_result(file_name):
K.clear_session()
model = load_model('tendermint/model.h5')
pic = Image.open(file_name)
Pic = np.array(pic)
x = Pic.reshape((1,)+Pic.shape+(1,))
val = model.predict(x)
print(val[0].argmax(axis=0))
return val[0].argmax(axis=0) | 406 |
189 | from common.evaluators.classification_evaluator import ClassificationEvaluator
from common.evaluators.relevance_transfer_evaluator import RelevanceTransferEvaluator
class EvaluatorFactory(object):
"""
Get the corresponding Evaluator class for a particular dataset.
"""
evaluator_map = {
'Reuters': ClassificationEvaluator,
'AAPD': ClassificationEvaluator,
'IMDB': ClassificationEvaluator,
'Yelp2014': ClassificationEvaluator,
'Robust04': RelevanceTransferEvaluator,
'Robust05': RelevanceTransferEvaluator,
'Robust45': RelevanceTransferEvaluator,
'Personality': ClassificationEvaluator,
'News': ClassificationEvaluator,
'News_art': ClassificationEvaluator,
'Procon': ClassificationEvaluator
}
@staticmethod
def get_evaluator(dataset_cls, model, embedding, data_loader, batch_size, device, keep_results=False):
if data_loader is None:
return None
if not hasattr(dataset_cls, 'NAME'):
raise ValueError('Invalid dataset. Dataset should have NAME attribute.')
if dataset_cls.NAME not in EvaluatorFactory.evaluator_map:
raise ValueError('{} is not implemented.'.format(dataset_cls))
return EvaluatorFactory.evaluator_map[dataset_cls.NAME](
dataset_cls, model, embedding, data_loader, batch_size, device, keep_results
)
| 568 |
190 | # -*- coding: utf-8 -*-
'''
Support for Eix
'''
from __future__ import absolute_import
# Import salt libs
import salt.utils
def __virtual__():
'''
Only work on Gentoo systems with eix installed
'''
if __grains__['os'] == 'Gentoo' and salt.utils.which('eix'):
return 'eix'
return (False, 'The eix execution module cannot be loaded: either the system is not Gentoo or the eix binary is not in the path.')
def sync():
'''
Sync portage/overlay trees and update the eix database
CLI Example:
.. code-block:: bash
salt '*' eix.sync
'''
cmd = 'eix-sync -q -C "--ask" -C "n"'
if 'makeconf.features_contains'in __salt__ and __salt__['makeconf.features_contains']('webrsync-gpg'):
# GPG sign verify is supported only for "webrsync"
if salt.utils.which('emerge-delta-webrsync'): # We prefer 'delta-webrsync' to 'webrsync'
cmd += ' -W'
else:
cmd += ' -w'
return __salt__['cmd.retcode'](cmd) == 0
else:
if __salt__['cmd.retcode'](cmd) == 0:
return True
# We fall back to "webrsync" if "rsync" fails for some reason
if salt.utils.which('emerge-delta-webrsync'): # We prefer 'delta-webrsync' to 'webrsync'
cmd += ' -W'
else:
cmd += ' -w'
return __salt__['cmd.retcode'](cmd) == 0
def update():
'''
Update the eix database
CLI Example:
.. code-block:: bash
salt '*' eix.update
'''
cmd = 'eix-update --quiet'
return __salt__['cmd.retcode'](cmd) == 0
| 706 |
191 | # Generated by Django 2.2.2 on 2019-07-30 13:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_user_gain_votes'),
]
operations = [
migrations.AddField(
model_name='user',
name='brithday',
field=models.DateField(blank=True, null=True, verbose_name='生日'),
),
]
| 184 |
192 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Note: Any AirflowException raised is expected to cause the TaskInstance
# to be marked in an ERROR state
class AirflowException(Exception):
pass
class AirflowConfigException(AirflowException):
pass
class AirflowSensorTimeout(AirflowException):
pass
class AirflowTaskTimeout(AirflowException):
pass
class AirflowWebServerTimeout(AirflowException):
pass
class AirflowSkipException(AirflowException):
pass
class AirflowDagCycleException(AirflowException):
pass
| 305 |
193 | #
# __init__.py
# crest-python
#
# Copyright (C) 2017 Rue Yokaze
# Distributed under the MIT License.
#
from crest.events._abstract_midi_event import AbstractMidiEvent
from crest.events._channel_pressure_event import ChannelPressureEvent
from crest.events._control_event import ControlEvent
from crest.events._exclusive_event import ExclusiveEvent
from crest.events._key_pressure_event import KeyPressureEvent
from crest.events._midi_event import MidiEvent
from crest.events._note_off_event import NoteOffEvent
from crest.events._note_on_event import NoteOnEvent
from crest.events._pitch_event import PitchEvent
from crest.events._program_event import ProgramEvent
from crest.events import factory
from crest.events import primitive
__all__ = [
factory.__name__.split('.')[-1],
primitive.__name__.split('.')[-1],
AbstractMidiEvent.__name__,
ChannelPressureEvent.__name__,
ControlEvent.__name__,
ExclusiveEvent.__name__,
KeyPressureEvent.__name__,
MidiEvent.__name__,
NoteOffEvent.__name__,
NoteOnEvent.__name__,
PitchEvent.__name__,
ProgramEvent.__name__
]
| 363 |
194 | from typing import TYPE_CHECKING
try:
from nextcord.ext import commands
except ModuleNotFoundError:
from disnake.ext import commands
from bot_base.wraps import Meta
if TYPE_CHECKING:
from bot_base import BotBase
class BotContext(commands.Context, Meta):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
bot: "BotBase" = self.bot
self._wrapped_bot = bot
self.message = bot.get_wrapped_message(self.message)
| 183 |
195 | """
evaluation_config_batch.py
Author: Olivier Vadiavaloo
Description:
This file implements an EvaluationConfig sub-class. In contrast to EvaluationConfigNormal,
this class keeps track of the maximum scores of batches of games played by the strategy provided
by the synthesizer and returns the average of the maximum scores as the strategy's score.
"""
from src.Evaluation.EvaluationConfig.evaluation_config_parent import *
from statistics import *
class EvaluationConfigBatch(EvaluationConfig):
def set_config_attributes(self, attributes):
self.batch_size = attributes[self.batch_size_name]
assert type(self.batch_size) is int, 'batch_size must be an integer'
super(EvaluationConfigBatch, self).set_config_attributes(attributes)
def set_batch_size(self, batch_size):
old_value = self.batch_size
self.batch_size = batch_size
return old_value
def get_batch_size(self):
return self.batch_size
def clean_up(self):
self.max_scores = []
self.last_score_index = 0
def compute_result(self, scores, games_played):
if not self.config_attributes_set:
raise Exception(
'Must set attributes of EvaluationConfigBatch object using set_config_attributes'
)
if games_played % self.batch_size == 0:
batch_scores = scores[self.last_score_index:]
max_batch_score = max(batch_scores)
self.max_scores.append(max_batch_score)
self.last_score_index = len(scores)
if len(self.max_scores) > 0:
return round(mean(self.max_scores), 2)
else:
return self.MIN_SCORE
def check_continue(self, program_current_score, games_played):
if games_played == self.total_games:
self.last_score_index = 0
return False
if self.triage and len(self.max_scores) > 0:
if self.check_triage_stop(
program_current_score,
self.compute_epsilon(games_played),
self.compute_epsilon(self.total_games)
):
self.last_score_index = 0
return False
return True | 939 |
196 | import cmath
from lark import Lark, InlineTransformer
grammar = Lark(
r"""
start : expr
?expr : sum
?sum : sum "+" mul -> add
| sum "-" mul -> sub
| mul
?mul : mul "*" pow -> mul
| mul "/" pow -> div
| pow
?pow : unary "^" pow -> pow
| unary
?unary : "-" atom -> neg
| "+" atom -> pos
| atom
?atom : INT
| COMPLEX
| NAME -> name
| NAME "(" expr ")" -> func
| "(" expr ")"
INT : ("0".."9")+
COMPLEX : INT "i"
NAME : ("a".."z" | "_" | "A".."Z")+
%ignore " "
"""
)
class CalcTransformer(InlineTransformer):
from operator import add, sub, mul, truediv as div, pow, neg, pos
names = {
"pi": cmath.pi,
"e": cmath.e,
"answer": 42,
"log": cmath.log,
"sqrt": cmath.sqrt,
}
def __init__(self):
super().__init__()
self.env = self.names.copy()
def INT(self, tk):
return int(tk)
def COMPLEX(self, tk):
return self.INT(tk[:-1]) * 1j
def name(self, tk):
try:
return self.names[tk]
except KeyError:
raise ValueError(f'variável inexistente: {tk}')
def func(self, name, arg):
fn = self.name(name)
if callable(fn):
return fn(arg)
raise ValueError(f'{fn} não é uma função!')
def assign(self, name, value):
self.env[name] = value
transformer = CalcTransformer()
# exemplos = '40 2 +', '3 2 - 1 -', '2 10 4 * +', '4 3 2 ^ ^'
exemplos = "x = 1; x + 1", # "2 * pi", "e^1", "3 + 2i", '3 - 2 - (-1)', '(2 + 10) * 4', '4 ^ 3 ^ 2'
for src in exemplos:
tree = grammar.parse(src)
print(src)
print(tree.pretty())
print(transformer.transform(tree).pretty()) | 930 |
197 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 11:07:47 2019
@author: edoardottt
"""
import sqlite3
from datetime import datetime
def save(missili,bombe,nemici):
conn = sqlite3.connect('result.db')
timenow = datetime.now()
c = conn.cursor()
c.execute("INSERT INTO match VALUES(?,?,?,?)",(timenow,missili,bombe,nemici))
conn.commit()
c.execute("SELECT missili,bombe,nemici FROM match WHERE date=?",(str(timenow),))
c.execute("SELECT MAX(missili) FROM match")
bold = str(c.fetchone())
bold = bold.replace("(", "")
bold = bold.replace(")", "")
b = bold.replace(",","")
c.execute("SELECT MAX(bombe) FROM match")
aold = str(c.fetchone())
aold = aold.replace("(", "")
aold = aold.replace(")", "")
a = aold.replace(",","")
c.execute("SELECT MAX(nemici) FROM match")
dold = str(c.fetchone())
dold = dold.replace("(", "")
dold = dold.replace(")", "")
d = dold.replace(",","")
conn.commit()
conn.close()
return [a,b,d]
| 523 |
198 | import sys
import os
abspath = '/'.join(os.path.abspath(__file__).split('/')[:-1]) + '/'
under_abspath = '/'.join(os.path.abspath(__file__).split('/')[:-2])
sys.path.insert(0, under_abspath)
from environment import splendor
from print_board import PrintBoard
from model import Model
from copy import deepcopy
env = splendor.Splendor()
model = Model(env.colors)
model2 = Model(env.colors)
for game_number in range(1):
state = deepcopy(env.reset())
model.eps *= model.decay_factor
done = False
game_round = 0
while not done:
empty_moves = 0
game_round += 1
action = model.best_action(state, env)
new_state = model.step(state, action, env)
model.update_weights(state, new_state, action, env)
if action == 6:
break
empty_moves += 1
elif action == 5:
print('bought!')
break
'''
if new_state['return_tokens']:
action = model.best_action(new_state, env)
old_state = new_state
new_state = model.step(new_state, action, env)
model.update_weights(old_state, new_state, action, env)
if not new_s['return_tokens']:
break
'''
state = new_state
if state['return_tokens']:
break
for i in range(3):
state = env.move({'pick': {}})
continue
action = model2.best_action(state, env)
new_state = model2.step(state, action, env)
if action == 6:
empty_moves += 1
elif action == 5:
print('other one bought')
done = True
'''
if new_state['return_tokens']:
action = model2.best_action(new_state, env)
new_state = model2.step(new_state, action, env)
if not new_s['return_tokens']:
break
'''
state = new_state
#PrintBoard.print_state(new_state, game_round, 0)
if empty_moves == 4:
print('breaking!')
break
print(env.return_state(False)['players'][0]['tokens'])
#print(env.return_state(False)['players'][0]['cards'])
#PrintBoard.print_state(new_state, game_round, 0)
if not (game_number%100):
#print(model.eps)
print('game round: {}'.format(game_number))
for layer in model.model.layers:
for i in layer.get_weights()[0]:
print(' '.join(str([round(x, 2) for x in list(i)]).replace(',', ' ').split())) | 888 |
199 | # Generated by Django 2.2.7 on 2019-11-24 13:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0004_auto_20191123_1734'),
]
operations = [
migrations.AddField(
model_name='stock',
name='product_pic',
field=models.ImageField(default='sahara/sahara-2.png', upload_to='products'),
),
]
| 190 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.