hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d99b5ab0ec594ac30b1d197b23a5cda7c48151d5
| 18,065
|
py
|
Python
|
rasa/train.py
|
Amirali-Shirkh/rasa-for-botfront
|
36aa24ad31241c5d1a180bbe34e1c8c50da40ff7
|
[
"Apache-2.0"
] | null | null | null |
rasa/train.py
|
Amirali-Shirkh/rasa-for-botfront
|
36aa24ad31241c5d1a180bbe34e1c8c50da40ff7
|
[
"Apache-2.0"
] | null | null | null |
rasa/train.py
|
Amirali-Shirkh/rasa-for-botfront
|
36aa24ad31241c5d1a180bbe34e1c8c50da40ff7
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import os
import tempfile
from contextlib import ExitStack
from typing import Text, Optional, List, Union, Dict
from rasa.importers.importer import TrainingDataImporter
from rasa import model
from rasa.model import FingerprintComparisonResult
from rasa.core.domain import Domain
from rasa.utils.common import TempDirectoryPath
from rasa.cli.utils import (
print_success,
print_warning,
print_error,
bcolors,
print_color,
)
from rasa.constants import DEFAULT_MODELS_PATH, DEFAULT_CORE_SUBDIRECTORY_NAME
def train(
domain: Text,
config: Text,
training_files: Union[Text, List[Text]],
output: Text = DEFAULT_MODELS_PATH,
force_training: bool = False,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
additional_arguments: Optional[Dict] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> Optional[Text]:
if loop is None:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(
train_async(
domain=domain,
config=config,
training_files=training_files,
output_path=output,
force_training=force_training,
fixed_model_name=fixed_model_name,
persist_nlu_training_data=persist_nlu_training_data,
additional_arguments=additional_arguments,
)
)
async def train_async(
domain: Union[Domain, Text],
config: Dict[Text, Text],
training_files: Optional[Union[Text, List[Text]]],
output_path: Text = DEFAULT_MODELS_PATH,
force_training: bool = False,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
additional_arguments: Optional[Dict] = None,
) -> Optional[Text]:
"""Trains a Rasa model (Core and NLU).
Args:
domain: Path to the domain file.
config: Dict of paths to the config for Core and NLU. Keys are language codes
training_files: Paths to the training data for Core and NLU.
output_path: Output path.
force_training: If `True` retrain model even if data has not changed.
fixed_model_name: Name of model to be stored.
persist_nlu_training_data: `True` if the NLU training data should be persisted
with the model.
additional_arguments: Additional training parameters.
Returns:
Path of the trained model archive.
"""
# file_importer = TrainingDataImporter.load_from_config(
# config, domain, training_files
# )
with ExitStack() as stack:
train_path = stack.enter_context(TempDirectoryPath(tempfile.mkdtemp()))
# bf mod
from rasa_addons.importers import BotfrontFileImporter
file_importer = BotfrontFileImporter(config, domain, training_files)
# domain = await file_importer.get_domain()
# if domain.is_empty():
# return await handle_domain_if_not_exists(
# file_importer, output_path, fixed_model_name
# )
# /bf mod
return await _train_async_internal(
file_importer,
train_path,
output_path,
force_training,
fixed_model_name,
persist_nlu_training_data,
additional_arguments,
)
async def handle_domain_if_not_exists(
file_importer: TrainingDataImporter, output_path, fixed_model_name
):
nlu_model_only = await _train_nlu_with_validated_data(
file_importer, output=output_path, fixed_model_name=fixed_model_name
)
print_warning(
"Core training was skipped because no valid domain file was found. Only an nlu-model was created."
"Please specify a valid domain using '--domain' argument or check if the provided domain file exists."
)
return nlu_model_only
async def _train_async_internal(
file_importer: TrainingDataImporter,
train_path: Text,
output_path: Text,
force_training: bool,
fixed_model_name: Optional[Text],
persist_nlu_training_data: bool,
additional_arguments: Optional[Dict],
) -> Optional[Text]:
"""Trains a Rasa model (Core and NLU). Use only from `train_async`.
Args:
file_importer: `TrainingDataImporter` which supplies the training data.
train_path: Directory in which to train the model.
output_path: Output path.
force_training: If `True` retrain model even if data has not changed.
persist_nlu_training_data: `True` if the NLU training data should be persisted
with the model.
fixed_model_name: Name of model to be stored.
additional_arguments: Additional training parameters.
Returns:
Path of the trained model archive.
"""
stories, nlu_data = await asyncio.gather(
file_importer.get_stories(), file_importer.get_nlu_data()
)
# if stories.is_empty() and nlu_data.is_empty():
# print_error(
# "No training data given. Please provide stories and NLU data in "
# "order to train a Rasa model using the '--data' argument."
# )
# return
# if nlu_data.is_empty():
# print_warning("No NLU data present. Just a Rasa Core model will be trained.")
# return await _train_core_with_validated_data(
# file_importer,
# output=output_path,
# fixed_model_name=fixed_model_name,
# additional_arguments=additional_arguments,
# )
new_fingerprint = await model.model_fingerprint(file_importer)
old_model = model.get_latest_model(output_path)
fingerprint_comparison = FingerprintComparisonResult(force_training=force_training)
if not force_training:
fingerprint_comparison = model.should_retrain(
new_fingerprint, old_model, train_path
)
# bf mod >
if fingerprint_comparison.nlu == True: # replace True with list of all langs
fingerprint_comparison.nlu = list(new_fingerprint.get("nlu-config", {}).keys())
domain = await file_importer.get_domain()
core_untrainable = domain.is_empty() or stories.is_empty()
nlu_untrainable = [l for l, d in nlu_data.items() if d.is_empty()]
fingerprint_comparison.core = fingerprint_comparison.core and not core_untrainable
fingerprint_comparison.nlu = [l for l in fingerprint_comparison.nlu if l not in nlu_untrainable]
if core_untrainable:
print_color("Skipping Core training since domain or stories are empty.", color=bcolors.OKBLUE)
for lang in nlu_untrainable:
print_color("No NLU data found for language <{}>, skipping training...".format(lang), color=bcolors.OKBLUE)
# </ bf mod
if fingerprint_comparison.is_training_required():
await _do_training(
file_importer,
output_path=output_path,
train_path=train_path,
fingerprint_comparison_result=fingerprint_comparison,
fixed_model_name=fixed_model_name,
persist_nlu_training_data=persist_nlu_training_data,
additional_arguments=additional_arguments,
)
return model.package_model(
fingerprint=new_fingerprint,
output_directory=output_path,
train_path=train_path,
fixed_model_name=fixed_model_name,
)
print_success(
"Nothing changed. You can use the old model stored at '{}'."
"".format(os.path.abspath(old_model))
)
return old_model
async def _do_training(
file_importer: TrainingDataImporter,
output_path: Text,
train_path: Text,
fingerprint_comparison_result: Optional[FingerprintComparisonResult] = None,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
additional_arguments: Optional[Dict] = None,
):
if not fingerprint_comparison_result:
fingerprint_comparison_result = FingerprintComparisonResult()
if fingerprint_comparison_result.should_retrain_core():
await _train_core_with_validated_data(
file_importer,
output=output_path,
train_path=train_path,
fixed_model_name=fixed_model_name,
additional_arguments=additional_arguments,
)
elif fingerprint_comparison_result.should_retrain_nlg():
print_color(
"Core stories/configuration did not change. "
"Only the templates section has been changed. A new model with "
"the updated templates will be created.",
color=bcolors.OKBLUE,
)
await model.update_model_with_new_domain(file_importer, train_path)
else:
print_color(
"Core stories/configuration did not change. No need to retrain Core model.",
color=bcolors.OKBLUE,
)
if fingerprint_comparison_result.should_retrain_nlu():
await _train_nlu_with_validated_data(
file_importer,
output=output_path,
train_path=train_path,
fixed_model_name=fixed_model_name,
retrain_nlu=fingerprint_comparison_result.nlu,
persist_nlu_training_data=persist_nlu_training_data,
)
else:
print_color(
"NLU data/configuration did not change. No need to retrain NLU model.",
color=bcolors.OKBLUE,
)
def train_core(
domain: Union[Domain, Text],
config: Text,
stories: Text,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
additional_arguments: Optional[Dict] = None,
) -> Optional[Text]:
loop = asyncio.get_event_loop()
return loop.run_until_complete(
train_core_async(
domain=domain,
config=config,
stories=stories,
output=output,
train_path=train_path,
fixed_model_name=fixed_model_name,
additional_arguments=additional_arguments,
)
)
async def train_core_async(
domain: Union[Domain, Text],
config: Text,
stories: Text,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
additional_arguments: Optional[Dict] = None,
) -> Optional[Text]:
"""Trains a Core model.
Args:
domain: Path to the domain file.
config: Path to the config file for Core.
stories: Path to the Core training data.
output: Output path.
train_path: If `None` the model will be trained in a temporary
directory, otherwise in the provided directory.
fixed_model_name: Name of model to be stored.
uncompress: If `True` the model will not be compressed.
additional_arguments: Additional training parameters.
Returns:
If `train_path` is given it returns the path to the model archive,
otherwise the path to the directory with the trained model files.
"""
file_importer = TrainingDataImporter.load_core_importer_from_config(
config, domain, [stories]
)
domain = await file_importer.get_domain()
if domain.is_empty():
print_error(
"Core training was skipped because no valid domain file was found. "
"Please specify a valid domain using '--domain' argument or check if the provided domain file exists."
)
return None
if not await file_importer.get_stories():
print_error(
"No stories given. Please provide stories in order to "
"train a Rasa Core model using the '--stories' argument."
)
return
return await _train_core_with_validated_data(
file_importer,
output=output,
train_path=train_path,
fixed_model_name=fixed_model_name,
additional_arguments=additional_arguments,
)
async def _train_core_with_validated_data(
file_importer: TrainingDataImporter,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
additional_arguments: Optional[Dict] = None,
) -> Optional[Text]:
"""Train Core with validated training and config data."""
import rasa.core.train
with ExitStack() as stack:
if train_path:
# If the train path was provided, do nothing on exit.
_train_path = train_path
else:
# Otherwise, create a temp train path and clean it up on exit.
_train_path = stack.enter_context(TempDirectoryPath(tempfile.mkdtemp()))
# normal (not compare) training
print_color("Training Core model...", color=bcolors.OKBLUE)
domain, config = await asyncio.gather(
file_importer.get_domain(), file_importer.get_config()
)
await rasa.core.train(
domain_file=domain,
training_resource=file_importer,
output_path=os.path.join(_train_path, DEFAULT_CORE_SUBDIRECTORY_NAME),
policy_config=config,
additional_arguments=additional_arguments,
)
print_color("Core model training completed.", color=bcolors.OKBLUE)
if train_path is None:
# Only Core was trained.
new_fingerprint = await model.model_fingerprint(file_importer)
return model.package_model(
fingerprint=new_fingerprint,
output_directory=output,
train_path=_train_path,
fixed_model_name=fixed_model_name,
model_prefix="core-",
)
return _train_path
def train_nlu(
config: Text,
nlu_data: Text,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
) -> Optional[Text]:
"""Trains an NLU model.
Args:
config: Path to the config file for NLU.
nlu_data: Path to the NLU training data.
output: Output path.
train_path: If `None` the model will be trained in a temporary
directory, otherwise in the provided directory.
fixed_model_name: Name of the model to be stored.
persist_nlu_training_data: `True` if the NLU training data should be persisted
with the model.
Returns:
If `train_path` is given it returns the path to the model archive,
otherwise the path to the directory with the trained model files.
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(
_train_nlu_async(
config,
nlu_data,
output,
train_path,
fixed_model_name,
persist_nlu_training_data,
)
)
async def _train_nlu_async(
config: Text,
nlu_data: Text,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
):
if not nlu_data:
print_error(
"No NLU data given. Please provide NLU data in order to train "
"a Rasa NLU model using the '--nlu' argument."
)
return
# training NLU only hence the training files still have to be selected
file_importer = TrainingDataImporter.load_nlu_importer_from_config(
config, training_data_paths=[nlu_data]
)
training_datas = await file_importer.get_nlu_data()
if training_datas.is_empty():
print_error(
f"Path '{nlu_data}' doesn't contain valid NLU data in it. "
"Please verify the data format. "
"The NLU model training will be skipped now."
)
return
return await _train_nlu_with_validated_data(
file_importer,
output=output,
train_path=train_path,
fixed_model_name=fixed_model_name,
persist_nlu_training_data=persist_nlu_training_data,
)
async def _train_nlu_with_validated_data(
file_importer: TrainingDataImporter,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
retrain_nlu: Union[bool, List[Text]] = True
) -> Optional[Text]:
"""Train NLU with validated training and config data."""
import rasa.nlu.train
with ExitStack() as stack:
models = {}
from rasa.nlu import config as cfg_loader
if train_path:
# If the train path was provided, do nothing on exit.
_train_path = train_path
else:
# Otherwise, create a temp train path and clean it up on exit.
_train_path = stack.enter_context(TempDirectoryPath(tempfile.mkdtemp()))
# bf mod
config = await file_importer.get_nlu_config(retrain_nlu)
for lang in config:
if config[lang]:
print_color("Start training {} NLU model ...".format(lang), color=bcolors.OKBLUE)
_, models[lang], _ = await rasa.nlu.train(
config[lang],
file_importer,
_train_path,
fixed_model_name="nlu-{}".format(lang),
persist_nlu_training_data=persist_nlu_training_data,
)
else:
print_color("NLU data for language <{}> didn't change, skipping training...".format(lang), color=bcolors.OKBLUE)
# /bf mod
print_color("NLU model training completed.", color=bcolors.OKBLUE)
if train_path is None:
# Only NLU was trained
new_fingerprint = await model.model_fingerprint(file_importer)
return model.package_model(
fingerprint=new_fingerprint,
output_directory=output,
train_path=_train_path,
fixed_model_name=fixed_model_name,
model_prefix="nlu-",
)
return _train_path
| 34.673704
| 128
| 0.654027
| 2,139
| 18,065
| 5.259467
| 0.099579
| 0.0432
| 0.053511
| 0.043022
| 0.643644
| 0.565956
| 0.532178
| 0.5128
| 0.470756
| 0.448444
| 0
| 0
| 0.275505
| 18,065
| 520
| 129
| 34.740385
| 0.859566
| 0.102131
| 0
| 0.47619
| 0
| 0
| 0.097136
| 0.003
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008403
| false
| 0
| 0.123249
| 0
| 0.173669
| 0.120448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d99ed7256245422c7c5dd3c60b0661e4f78183ea
| 35,585
|
py
|
Python
|
rplugin/python3/denite/ui/default.py
|
timgates42/denite.nvim
|
12a9b5456f5a4600afeb0ba284ce1098bd35e501
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/ui/default.py
|
timgates42/denite.nvim
|
12a9b5456f5a4600afeb0ba284ce1098bd35e501
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/ui/default.py
|
timgates42/denite.nvim
|
12a9b5456f5a4600afeb0ba284ce1098bd35e501
|
[
"MIT"
] | null | null | null |
# ============================================================================
# FILE: default.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import re
import typing
from denite.util import echo, error, clearmatch, regex_convert_py_vim
from denite.util import Nvim, UserContext, Candidates, Candidate
from denite.parent import SyncParent
class Default(object):
@property
def is_async(self) -> bool:
return self._is_async
def __init__(self, vim: Nvim) -> None:
self._vim = vim
self._denite: typing.Optional[SyncParent] = None
self._selected_candidates: typing.List[int] = []
self._candidates: Candidates = []
self._cursor = 0
self._entire_len = 0
self._result: typing.List[typing.Any] = []
self._context: UserContext = {}
self._bufnr = -1
self._winid = -1
self._winrestcmd = ''
self._initialized = False
self._winheight = 0
self._winwidth = 0
self._winminheight = -1
self._is_multi = False
self._is_async = False
self._matched_pattern = ''
self._displayed_texts: typing.List[str] = []
self._statusline_sources = ''
self._titlestring = ''
self._ruler = False
self._prev_action = ''
self._prev_status: typing.Dict[str, typing.Any] = {}
self._prev_curpos: typing.List[typing.Any] = []
self._save_window_options: typing.Dict[str, typing.Any] = {}
self._sources_history: typing.List[typing.Any] = []
self._previous_text = ''
self._floating = False
self._filter_floating = False
self._updated = False
self._timers: typing.Dict[str, int] = {}
self._matched_range_id = -1
self._matched_char_id = -1
self._check_matchdelete = bool(self._vim.call(
'denite#util#check_matchdelete'))
def start(self, sources: typing.List[typing.Any],
context: UserContext) -> typing.List[typing.Any]:
if not self._denite:
# if hasattr(self._vim, 'run_coroutine'):
# self._denite = ASyncParent(self._vim)
# else:
self._denite = SyncParent(self._vim)
self._result = []
context['sources_queue'] = [sources]
self._start_sources_queue(context)
return self._result
def do_action(self, action_name: str,
command: str = '', is_manual: bool = False) -> None:
if is_manual:
candidates = self._get_selected_candidates()
elif self._get_cursor_candidate():
candidates = [self._get_cursor_candidate()]
else:
candidates = []
if not self._denite or not candidates or not action_name:
return
self._prev_action = action_name
action = self._denite.get_action(
self._context, action_name, candidates)
if not action:
return
post_action = self._context['post_action']
is_quit = action['is_quit'] or post_action == 'quit'
if is_quit:
self.quit()
self._denite.do_action(self._context, action_name, candidates)
self._result = candidates
if command != '':
self._vim.command(command)
if is_quit and post_action == 'open':
# Re-open denite buffer
prev_cursor = self._cursor
cursor_candidate = self._get_cursor_candidate()
self._init_buffer()
self.redraw(False)
if cursor_candidate == self._get_candidate(prev_cursor):
# Restore the cursor
self._move_to_pos(prev_cursor)
# Disable quit flag
is_quit = False
if not is_quit and is_manual:
self._selected_candidates = []
self.redraw(action['is_redraw'])
if is_manual and self._context['sources_queue']:
self._context['input'] = ''
self._context['quick_move'] = ''
self._start_sources_queue(self._context)
return
def redraw(self, is_force: bool = True) -> None:
self._context['is_redraw'] = is_force
if is_force:
self._gather_candidates()
if self._update_candidates():
self._update_buffer()
else:
self._update_status()
self._context['is_redraw'] = False
def quit(self) -> None:
if self._denite:
self._denite.on_close(self._context)
self._quit_buffer()
self._result = []
return
def _restart(self) -> None:
self._context['input'] = ''
self._quit_buffer()
self._init_denite()
self._gather_candidates()
self._init_buffer()
self._update_candidates()
self._update_buffer()
def _start_sources_queue(self, context: UserContext) -> None:
if not context['sources_queue']:
return
self._sources_history.append({
'sources': context['sources_queue'][0],
'path': context['path'],
})
self._start(context['sources_queue'][0], context)
if context['sources_queue']:
context['sources_queue'].pop(0)
context['path'] = self._context['path']
def _start(self, sources: typing.List[typing.Any],
context: UserContext) -> None:
from denite.ui.map import do_map
self._vim.command('silent! autocmd! denite')
if re.search(r'\[Command Line\]$', self._vim.current.buffer.name):
# Ignore command line window.
return
resume = self._initialized and context['resume']
if resume:
# Skip the initialization
update = ('immediately', 'immediately_1',
'cursor_pos', 'prev_winid',
'start_filter', 'quick_move')
for key in update:
self._context[key] = context[key]
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
if context['refresh']:
self.redraw()
self._move_to_pos(self._cursor)
else:
if self._context != context:
self._context.clear()
self._context.update(context)
self._context['sources'] = sources
self._context['is_redraw'] = False
self._is_multi = len(sources) > 1
if not sources:
# Ignore empty sources.
error(self._vim, 'Empty sources')
return
self._init_denite()
self._gather_candidates()
self._update_candidates()
self._init_cursor()
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
self._update_displayed_texts()
self._update_buffer()
self._move_to_pos(self._cursor)
if self._context['quick_move'] and do_map(self, 'quick_move', []):
return
if self._context['start_filter']:
do_map(self, 'open_filter_buffer', [])
def _init_buffer(self) -> None:
self._prev_status = dict()
self._displayed_texts = []
self._prev_bufnr = self._vim.current.buffer.number
self._prev_curpos = self._vim.call('getcurpos')
self._prev_wininfo = self._get_wininfo()
self._prev_winid = self._context['prev_winid']
self._winrestcmd = self._vim.call('winrestcmd')
self._ruler = self._vim.options['ruler']
self._switch_buffer()
self._bufnr = self._vim.current.buffer.number
self._winid = self._vim.call('win_getid')
self._resize_buffer(True)
self._winheight = self._vim.current.window.height
self._winwidth = self._vim.current.window.width
self._bufvars = self._vim.current.buffer.vars
self._bufvars['denite'] = {
'buffer_name': self._context['buffer_name'],
}
self._bufvars['denite_statusline'] = {}
self._vim.vars['denite#_previewed_buffers'] = {}
self._save_window_options = {}
window_options = {
'colorcolumn',
'concealcursor',
'conceallevel',
'cursorcolumn',
'cursorline',
'foldcolumn',
'foldenable',
'list',
'number',
'relativenumber',
'signcolumn',
'spell',
'winfixheight',
'wrap',
}
for k in window_options:
self._save_window_options[k] = self._vim.current.window.options[k]
# Note: Have to use setlocal instead of "current.window.options"
# "current.window.options" changes global value instead of local in
# neovim.
self._vim.command('setlocal colorcolumn=')
self._vim.command('setlocal conceallevel=3')
self._vim.command('setlocal concealcursor=inv')
self._vim.command('setlocal nocursorcolumn')
self._vim.command('setlocal nofoldenable')
self._vim.command('setlocal foldcolumn=0')
self._vim.command('setlocal nolist')
self._vim.command('setlocal nonumber')
self._vim.command('setlocal norelativenumber')
self._vim.command('setlocal nospell')
self._vim.command('setlocal winfixheight')
self._vim.command('setlocal nowrap')
if self._context['prompt']:
self._vim.command('setlocal signcolumn=yes')
else:
self._vim.command('setlocal signcolumn=auto')
if self._context['cursorline']:
self._vim.command('setlocal cursorline')
options = self._vim.current.buffer.options
if self._floating:
# Disable ruler
self._vim.options['ruler'] = False
options['buftype'] = 'nofile'
options['bufhidden'] = 'delete'
options['swapfile'] = False
options['buflisted'] = False
options['modeline'] = False
options['modifiable'] = False
options['filetype'] = 'denite'
if self._vim.call('exists', '#WinEnter'):
self._vim.command('doautocmd WinEnter')
if self._vim.call('exists', '#BufWinEnter'):
self._vim.command('doautocmd BufWinEnter')
if not self._vim.call('has', 'nvim'):
# In Vim8, FileType autocmd is not fired after set filetype option.
self._vim.command('silent doautocmd FileType denite')
if self._context['auto_action']:
self._vim.command('autocmd denite '
'CursorMoved <buffer> '
'call denite#call_map("auto_action")')
self._init_syntax()
def _switch_buffer(self) -> None:
split = self._context['split']
if (split != 'no' and self._winid > 0 and
self._vim.call('win_gotoid', self._winid)):
if split != 'vertical' and not self._floating:
# Move the window to bottom
self._vim.command('wincmd J')
self._winrestcmd = ''
return
self._floating = split in [
'floating',
'floating_relative_cursor',
'floating_relative_window',
]
self._filter_floating = False
if self._vim.current.buffer.options['filetype'] != 'denite':
self._titlestring = self._vim.options['titlestring']
command = 'edit'
if split == 'tab':
self._vim.command('tabnew')
elif self._floating:
self._split_floating(split)
elif self._context['filter_split_direction'] == 'floating':
self._filter_floating = True
elif split != 'no':
command = self._get_direction()
command += ' vsplit' if split == 'vertical' else ' split'
bufname = '[denite]-' + self._context['buffer_name']
if self._vim.call('exists', '*bufadd'):
bufnr = self._vim.call('bufadd', bufname)
vertical = 'vertical' if split == 'vertical' else ''
command = (
'buffer' if split
in ['no', 'tab', 'floating',
'floating_relative_window',
'floating_relative_cursor'] else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._get_direction(),
vertical,
command,
bufnr,
)
)
else:
self._vim.call(
'denite#util#execute_path',
f'silent keepalt {command}', bufname)
def _get_direction(self) -> str:
direction = str(self._context['direction'])
if direction == 'dynamictop' or direction == 'dynamicbottom':
self._update_displayed_texts()
winwidth = self._vim.call('winwidth', 0)
is_fit = not [x for x in self._displayed_texts
if self._vim.call('strwidth', x) > winwidth]
if direction == 'dynamictop':
direction = 'aboveleft' if is_fit else 'topleft'
else:
direction = 'belowright' if is_fit else 'botright'
return direction
def _get_wininfo(self) -> typing.List[typing.Any]:
return [
self._vim.options['columns'], self._vim.options['lines'],
self._vim.call('win_getid'), self._vim.call('tabpagebuflist')
]
def _switch_prev_buffer(self) -> None:
if (self._prev_bufnr == self._bufnr or
self._vim.buffers[self._prev_bufnr].name == ''):
self._vim.command('enew')
else:
self._vim.command('buffer ' + str(self._prev_bufnr))
def _init_syntax(self) -> None:
self._vim.command('syntax case ignore')
self._vim.command('highlight default link deniteInput ModeMsg')
self._vim.command('highlight link deniteMatchedRange ' +
self._context['highlight_matched_range'])
self._vim.command('highlight link deniteMatchedChar ' +
self._context['highlight_matched_char'])
self._vim.command('highlight default link ' +
'deniteStatusLinePath Comment')
self._vim.command('highlight default link ' +
'deniteStatusLineNumber LineNR')
self._vim.command('highlight default link ' +
'deniteSelectedLine Statement')
if self._floating:
self._vim.current.window.options['winhighlight'] = (
'Normal:' + self._context['highlight_window_background']
)
self._vim.command(('syntax match deniteSelectedLine /^[%s].*/' +
' contains=deniteConcealedMark') % (
self._context['selected_icon']))
self._vim.command(('syntax match deniteConcealedMark /^[ %s]/' +
' conceal contained') % (
self._context['selected_icon']))
if self._denite:
self._denite.init_syntax(self._context, self._is_multi)
def _update_candidates(self) -> bool:
if not self._denite:
return False
[self._is_async, pattern, statuses, self._entire_len,
self._candidates] = self._denite.filter_candidates(self._context)
prev_displayed_texts = self._displayed_texts
self._update_displayed_texts()
prev_matched_pattern = self._matched_pattern
self._matched_pattern = pattern
prev_statusline_sources = self._statusline_sources
self._statusline_sources = ' '.join(statuses)
if self._is_async:
self._start_timer('update_candidates')
else:
self._stop_timer('update_candidates')
updated = (self._displayed_texts != prev_displayed_texts or
self._matched_pattern != prev_matched_pattern or
self._statusline_sources != prev_statusline_sources)
if updated:
self._updated = True
self._start_timer('update_buffer')
if self._context['search'] and self._context['input']:
self._vim.call('setreg', '/', self._context['input'])
return self._updated
def _update_displayed_texts(self) -> None:
candidates_len = len(self._candidates)
if not self._is_async and self._context['auto_resize']:
winminheight = self._context['winminheight']
max_height = min(self._context['winheight'],
self._get_max_height())
if (winminheight != -1 and candidates_len < winminheight):
self._winheight = winminheight
elif candidates_len > max_height:
self._winheight = max_height
elif candidates_len != self._winheight:
self._winheight = candidates_len
max_source_name_len = 0
if self._candidates:
max_source_name_len = max([
len(self._get_display_source_name(x['source_name']))
for x in self._candidates])
self._context['max_source_name_len'] = max_source_name_len
self._context['max_source_name_format'] = (
'{:<' + str(self._context['max_source_name_len']) + '}')
self._displayed_texts = [
self._get_candidate_display_text(i)
for i in range(0, candidates_len)
]
def _update_buffer(self) -> None:
is_current_buffer = self._bufnr == self._vim.current.buffer.number
self._update_status()
if self._check_matchdelete and self._context['match_highlight']:
matches = [x['id'] for x in
self._vim.call('getmatches', self._winid)]
if self._matched_range_id in matches:
self._vim.call('matchdelete',
self._matched_range_id, self._winid)
self._matched_range_id = -1
if self._matched_char_id in matches:
self._vim.call('matchdelete',
self._matched_char_id, self._winid)
self._matched_char_id = -1
if self._matched_pattern != '':
self._matched_range_id = self._vim.call(
'matchadd', 'deniteMatchedRange',
r'\c' + regex_convert_py_vim(self._matched_pattern),
10, -1, {'window': self._winid})
matched_char_pattern = '[{}]'.format(re.sub(
r'([\[\]\\^-])',
r'\\\1',
self._context['input'].replace(' ', '')
))
self._matched_char_id = self._vim.call(
'matchadd', 'deniteMatchedChar',
matched_char_pattern,
10, -1, {'window': self._winid})
prev_linenr = self._vim.call('line', '.')
prev_candidate = self._get_cursor_candidate()
buffer = self._vim.buffers[self._bufnr]
buffer.options['modifiable'] = True
self._vim.vars['denite#_candidates'] = [
x['word'] for x in self._candidates]
buffer[:] = self._displayed_texts
buffer.options['modifiable'] = False
self._previous_text = self._context['input']
self._resize_buffer(is_current_buffer)
is_changed = (self._context['reversed'] or
(is_current_buffer and
self._previous_text != self._context['input']))
if self._updated and is_changed:
if not is_current_buffer:
save_winid = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
self._init_cursor()
self._move_to_pos(self._cursor)
if not is_current_buffer:
self._vim.call('win_gotoid', save_winid)
elif is_current_buffer:
self._vim.call('cursor', [prev_linenr, 0])
if is_current_buffer:
if (self._context['auto_action'] and
prev_candidate != self._get_cursor_candidate()):
self.do_action(self._context['auto_action'])
self._updated = False
self._stop_timer('update_buffer')
def _update_status(self) -> None:
inpt = ''
if self._context['input']:
inpt = self._context['input'] + ' '
if self._context['error_messages']:
inpt = '[ERROR] ' + inpt
path = '[' + self._context['path'] + ']'
status = {
'input': inpt,
'sources': self._statusline_sources,
'path': path,
# Extra
'buffer_name': self._context['buffer_name'],
'line_total': len(self._candidates),
}
if status == self._prev_status:
return
self._bufvars['denite_statusline'] = status
self._prev_status = status
linenr = "printf('%'.(len(line('$'))+2).'d/%d',line('.'),line('$'))"
if self._context['statusline']:
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = (
"%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} " +
" %{denite#get_status('path')}%*" +
"%{" + linenr + "}%*")
else:
winnr = self._vim.call('win_id2win', self._winid)
self._vim.call('setwinvar', winnr, '&statusline', (
"%#deniteInput#%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} %=" +
"%#deniteStatusLinePath# %{denite#get_status('path')}%*" +
"%#deniteStatusLineNumber#%{" + linenr + "}%*"))
def _get_display_source_name(self, name: str) -> str:
source_names = self._context['source_names']
if not self._is_multi or source_names == 'hide':
source_name = ''
else:
short_name = (re.sub(r'([a-zA-Z])[a-zA-Z]+', r'\1', name)
if re.search(r'[^a-zA-Z]', name) else name[:2])
source_name = short_name if source_names == 'short' else name
return source_name
def _get_candidate_display_text(self, index: int) -> str:
source_names = self._context['source_names']
candidate = self._candidates[index]
terms = []
if self._is_multi and source_names != 'hide':
terms.append(self._context['max_source_name_format'].format(
self._get_display_source_name(candidate['source_name'])))
encoding = self._context['encoding']
abbr = candidate.get('abbr', candidate['word']).encode(
encoding, errors='replace').decode(encoding, errors='replace')
terms.append(abbr[:int(self._context['max_candidate_width'])])
return (str(self._context['selected_icon'])
if index in self._selected_candidates
else ' ') + ' '.join(terms).replace('\n', '')
def _get_max_height(self) -> int:
return int(self._vim.options['lines']) if not self._floating else (
int(self._vim.options['lines']) -
int(self._context['winrow']) -
int(self._vim.options['cmdheight']))
def _resize_buffer(self, is_current_buffer: bool) -> None:
split = self._context['split']
if (split == 'no' or split == 'tab' or
self._vim.call('winnr', '$') == 1):
return
winheight = max(self._winheight, 1)
winwidth = max(self._winwidth, 1)
is_vertical = split == 'vertical'
if not is_current_buffer:
restore = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
if not is_vertical and self._vim.current.window.height != winheight:
if self._floating:
wincol = self._context['winrow']
row = wincol
if split == 'floating':
if self._context['auto_resize'] and row > 1:
row += self._context['winheight']
row -= self._winheight
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'editor',
'row': row,
'col': self._context['wincol'],
'width': winwidth,
'height': winheight,
})
filter_row = 0 if wincol == 1 else row + winheight
filter_col = self._context['wincol']
else:
init_pos = self._vim.call('nvim_win_get_config',
self._winid)
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'win',
'win': init_pos['win'],
'row': init_pos['row'],
'col': init_pos['col'],
'width': winwidth,
'height': winheight,
})
filter_col = init_pos['col']
if init_pos['anchor'] == 'NW':
winpos = self._vim.call('nvim_win_get_position',
self._winid)
filter_row = winpos[0] + winheight
filter_winid = self._vim.vars['denite#_filter_winid']
self._context['filter_winrow'] = row
if self._vim.call('win_id2win', filter_winid) > 0:
self._vim.call('nvim_win_set_config', filter_winid, {
'relative': 'editor',
'row': filter_row,
'col': filter_col,
})
self._vim.command('resize ' + str(winheight))
if self._context['reversed']:
self._vim.command('normal! zb')
elif is_vertical and self._vim.current.window.width != winwidth:
self._vim.command('vertical resize ' + str(winwidth))
if not is_current_buffer:
self._vim.call('win_gotoid', restore)
def _check_do_option(self) -> bool:
if self._context['do'] != '':
self._do_command(self._context['do'])
return True
elif (self._candidates and self._context['immediately'] or
len(self._candidates) == 1 and self._context['immediately_1']):
self._do_immediately()
return True
return not (self._context['empty'] or
self._is_async or self._candidates)
def _check_move_option(self) -> None:
if self._context['cursor_pos'].isnumeric():
self._cursor = int(self._context['cursor_pos']) + 1
elif re.match(r'\+\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_next_line()
elif re.match(r'-\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_prev_line()
elif self._context['cursor_pos'] == '$':
self._move_to_last_line()
def _do_immediately(self) -> None:
goto = self._winid > 0 and self._vim.call(
'win_gotoid', self._winid)
if goto:
# Jump to denite window
self._init_buffer()
self.do_action('default')
candidate = self._get_cursor_candidate()
if not candidate:
return
echo(self._vim, 'Normal', '[{}/{}] {}'.format(
self._cursor, len(self._candidates),
candidate.get('abbr', candidate['word'])))
if goto:
# Move to the previous window
self._vim.command('wincmd p')
def _do_command(self, command: str) -> None:
self._init_cursor()
cursor = 1
while cursor < len(self._candidates):
self.do_action('default', command)
self._move_to_next_line()
self._quit_buffer()
def _cleanup(self) -> None:
self._stop_timer('update_candidates')
self._stop_timer('update_buffer')
if self._vim.current.buffer.number == self._bufnr:
self._cursor = self._vim.call('line', '.')
# Note: Close filter window before preview window
self._vim.call('denite#filter#_close_filter_window')
if not self._context['has_preview_window']:
self._vim.command('pclose!')
# Clear previewed buffers
for bufnr in self._vim.vars['denite#_previewed_buffers'].keys():
if not self._vim.call('win_findbuf', bufnr):
self._vim.command('silent bdelete ' + str(bufnr))
self._vim.vars['denite#_previewed_buffers'] = {}
self._vim.command('highlight! link CursorLine CursorLine')
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = self._titlestring
self._vim.options['ruler'] = self._ruler
def _close_current_window(self) -> None:
if self._vim.call('winnr', '$') == 1:
self._vim.command('buffer #')
else:
self._vim.command('close!')
def _quit_buffer(self) -> None:
self._cleanup()
if self._vim.call('bufwinnr', self._bufnr) < 0:
# Denite buffer is already closed
return
winids = self._vim.call('win_findbuf',
self._vim.vars['denite#_filter_bufnr'])
if winids:
# Quit filter buffer
self._vim.call('win_gotoid', winids[0])
self._close_current_window()
# Move to denite window
self._vim.call('win_gotoid', self._winid)
# Restore the window
if self._context['split'] == 'no':
self._switch_prev_buffer()
for k, v in self._save_window_options.items():
self._vim.current.window.options[k] = v
else:
if self._context['split'] == 'tab':
self._vim.command('tabclose!')
if self._context['split'] != 'tab':
self._close_current_window()
self._vim.call('win_gotoid', self._prev_winid)
# Restore the position
self._vim.call('setpos', '.', self._prev_curpos)
if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo:
# Note: execute restcmd twice to restore layout properly
self._vim.command(self._winrestcmd)
self._vim.command(self._winrestcmd)
clearmatch(self._vim)
def _get_cursor_candidate(self) -> Candidate:
return self._get_candidate(self._cursor)
def _get_candidate(self, pos: int) -> Candidate:
if not self._candidates or pos > len(self._candidates):
return {}
return self._candidates[pos - 1]
def _get_selected_candidates(self) -> Candidates:
if not self._selected_candidates:
return [self._get_cursor_candidate()
] if self._get_cursor_candidate() else []
return [self._candidates[x] for x in self._selected_candidates]
def _init_denite(self) -> None:
if self._denite:
self._denite.start(self._context)
self._denite.on_init(self._context)
self._initialized = True
self._winheight = self._context['winheight']
self._winwidth = self._context['winwidth']
def _gather_candidates(self) -> None:
self._selected_candidates = []
if self._denite:
self._denite.gather_candidates(self._context)
def _init_cursor(self) -> None:
if self._context['reversed']:
self._move_to_last_line()
else:
self._move_to_first_line()
def _move_to_pos(self, pos: int) -> None:
self._vim.call('cursor', pos, 0)
self._cursor = pos
if self._context['reversed']:
self._vim.command('normal! zb')
def _move_to_next_line(self) -> None:
if self._cursor < len(self._candidates):
self._cursor += 1
def _move_to_prev_line(self) -> None:
if self._cursor >= 1:
self._cursor -= 1
def _move_to_first_line(self) -> None:
self._cursor = 1
def _move_to_last_line(self) -> None:
self._cursor = len(self._candidates)
def _start_timer(self, key: str) -> None:
if key in self._timers:
return
if key == 'update_candidates':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_candidates_timer', self._bufnr)
elif key == 'update_buffer':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_buffer_timer', self._bufnr)
def _stop_timer(self, key: str) -> None:
if key not in self._timers:
return
self._vim.call('timer_stop', self._timers[key])
# Note: After timer_stop is called, self._timers may be removed
if key in self._timers:
self._timers.pop(key)
def _split_floating(self, split: str) -> None:
# Use floating window
if split == 'floating':
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'editor',
'row': self._context['winrow'],
'col': self._context['wincol'],
'width': self._context['winwidth'],
'height': self._context['winheight'],
})
elif split == 'floating_relative_cursor':
opened_pos = (self._vim.call('nvim_win_get_position', 0)[0] +
self._vim.call('winline') - 1)
if self._context['auto_resize']:
height = max(self._winheight, 1)
width = max(self._winwidth, 1)
else:
width = self._context['winwidth']
height = self._context['winheight']
if opened_pos + height + 3 > self._vim.options['lines']:
anchor = 'SW'
row = 0
self._context['filter_winrow'] = row + opened_pos
else:
anchor = 'NW'
row = 1
self._context['filter_winrow'] = row + height + opened_pos
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'cursor',
'row': row,
'col': 0,
'width': width,
'height': height,
'anchor': anchor,
})
elif split == 'floating_relative_window':
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'win',
'row': self._context['winrow'],
'col': self._context['wincol'],
'width': self._context['winwidth'],
'height': self._context['winheight'],
})
| 37.816153
| 79
| 0.54863
| 3,700
| 35,585
| 4.95
| 0.09973
| 0.058094
| 0.036637
| 0.012995
| 0.344253
| 0.19585
| 0.13448
| 0.096861
| 0.084084
| 0.06967
| 0
| 0.00293
| 0.328565
| 35,585
| 940
| 80
| 37.856383
| 0.763613
| 0.030547
| 0
| 0.25
| 0
| 0
| 0.141075
| 0.02771
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057592
| false
| 0
| 0.007853
| 0.005236
| 0.111257
| 0.001309
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d99f875863138f11af1d76f0c753c198ad6d96bd
| 1,329
|
py
|
Python
|
PyDSTool/core/context_managers.py
|
yuanz271/PyDSTool
|
886c143cdd192aea204285f3a1cb4968c763c646
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
PyDSTool/core/context_managers.py
|
yuanz271/PyDSTool
|
886c143cdd192aea204285f3a1cb4968c763c646
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
PyDSTool/core/context_managers.py
|
yuanz271/PyDSTool
|
886c143cdd192aea204285f3a1cb4968c763c646
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Context managers implemented for (mostly) internal use"""
import contextlib
import functools
from io import UnsupportedOperation
import os
import sys
__all__ = ["RedirectStdout", "RedirectStderr"]
@contextlib.contextmanager
def _stdchannel_redirected(stdchannel, dest_filename, mode="w"):
"""
A context manager to temporarily redirect stdout or stderr
Originally by Marc Abramowitz, 2013
(http://marc-abramowitz.com/archives/2013/07/19/python-context-manager-for-redirected-stdout-and-stderr/)
"""
oldstdchannel = None
dest_file = None
try:
if stdchannel is None:
yield iter([None])
else:
oldstdchannel = os.dup(stdchannel.fileno())
dest_file = open(dest_filename, mode)
os.dup2(dest_file.fileno(), stdchannel.fileno())
yield
except (UnsupportedOperation, AttributeError):
yield iter([None])
finally:
if oldstdchannel is not None:
os.dup2(oldstdchannel, stdchannel.fileno())
if dest_file is not None:
dest_file.close()
RedirectStdout = functools.partial(_stdchannel_redirected, sys.stdout)
RedirectStderr = functools.partial(_stdchannel_redirected, sys.stderr)
RedirectNoOp = functools.partial(_stdchannel_redirected, None, "")
| 28.891304
| 109
| 0.68924
| 144
| 1,329
| 6.229167
| 0.479167
| 0.044593
| 0.086957
| 0.120401
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014299
| 0.210685
| 1,329
| 45
| 110
| 29.533333
| 0.840801
| 0.209932
| 0
| 0.071429
| 0
| 0
| 0.028404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.178571
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d99ff34b5f61cee604590c456f40398d7da18182
| 3,215
|
py
|
Python
|
pos_kiosk/hooks.py
|
Muzzy73/pos_kiosk
|
1ed42cfaeb15f009293b76d05dd85bd322b42f03
|
[
"MIT"
] | 1
|
2022-03-05T11:42:36.000Z
|
2022-03-05T11:42:36.000Z
|
pos_kiosk/hooks.py
|
Muzzy73/pos_kiosk
|
1ed42cfaeb15f009293b76d05dd85bd322b42f03
|
[
"MIT"
] | null | null | null |
pos_kiosk/hooks.py
|
Muzzy73/pos_kiosk
|
1ed42cfaeb15f009293b76d05dd85bd322b42f03
|
[
"MIT"
] | 1
|
2022-03-05T11:42:37.000Z
|
2022-03-05T11:42:37.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "pos_kiosk"
app_title = "Pos Kiosk"
app_publisher = "9t9it"
app_description = "Kiosk App"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "[email protected]"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/pos_kiosk/css/pos_kiosk.css"
# app_include_js = "/assets/pos_kiosk/js/pos_kiosk.js"
# include js, css files in header of web template
# web_include_css = "/assets/pos_kiosk/css/pos_kiosk.css"
# web_include_js = "/assets/pos_kiosk/js/pos_kiosk.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# page_js = {
# "kiosk": ["public/js/pos_page_js.js", "public/js/includes/number_to_words.js"]
# }
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
fixtures = [
{
"doctype": "Custom Field",
"filters": [
[
"name",
"in",
[
"Sales Invoice Item-pos_kiosk",
"Mode of Payment-logo"
]
]
]
}
]
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "pos_kiosk.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "pos_kiosk.install.before_install"
# after_install = "pos_kiosk.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "pos_kiosk.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "pos_kiosk.tasks.all"
# ],
# "daily": [
# "pos_kiosk.tasks.daily"
# ],
# "hourly": [
# "pos_kiosk.tasks.hourly"
# ],
# "weekly": [
# "pos_kiosk.tasks.weekly"
# ]
# "monthly": [
# "pos_kiosk.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "pos_kiosk.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "pos_bahrain.api.get_item_details.get_item_details": "pos_kiosk.api.item.get_item_details" # noqa
# }
| 22.964286
| 101
| 0.631415
| 384
| 3,215
| 5.010417
| 0.338542
| 0.091476
| 0.033784
| 0.035343
| 0.235967
| 0.141892
| 0.108628
| 0.085239
| 0.085239
| 0.045738
| 0
| 0.001911
| 0.186003
| 3,215
| 139
| 102
| 23.129496
| 0.733282
| 0.7521
| 0
| 0
| 0
| 0
| 0.231206
| 0.031206
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.08
| 0
| 0.08
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9a0c8935f1da040f76922b94d20a857d8b8cd7d
| 3,338
|
py
|
Python
|
easyai/model/backbone/cls/pnasnet.py
|
lpj0822/image_point_cloud_det
|
7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f
|
[
"MIT"
] | 1
|
2020-09-05T09:18:56.000Z
|
2020-09-05T09:18:56.000Z
|
easyai/model/backbone/cls/pnasnet.py
|
lpj0822/image_point_cloud_det
|
7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f
|
[
"MIT"
] | 8
|
2020-04-20T02:18:55.000Z
|
2022-03-12T00:24:50.000Z
|
easyai/model/backbone/cls/pnasnet.py
|
lpj0822/image_point_cloud_det
|
7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:
''' PNASNet in PyTorch.
Paper: Progressive Neural Architecture Search
'''
from easyai.base_name.block_name import NormalizationType, ActivationType
from easyai.base_name.backbone_name import BackboneName
from easyai.model.backbone.utility.base_backbone import *
from easyai.model.base_block.utility.utility_block import ConvBNActivationBlock
from easyai.model.base_block.cls.pnasnet_block import CellA, CellB
__all__ = ['pnasnet_A', 'pnasnet_B']
class PNASNet(BaseBackbone):
def __init__(self, data_channel=3, num_cells=6,
num_planes=44, block=CellA,
bnName=NormalizationType.BatchNormalize2d,
activationName=ActivationType.ReLU):
super().__init__()
self.set_name(BackboneName.PNASNetA)
self.data_channel = data_channel
self.num_cells = num_cells
self.block = block
self.activation_name = activationName
self.bn_name = bnName
self.first_output = num_planes
self.in_planes = self.first_output
self.create_block_list()
def create_block_list(self):
self.block_out_channels = []
self.index = 0
layer1 = ConvBNActivationBlock(in_channels=self.data_channel,
out_channels=self.first_output,
kernel_size=3,
stride=1,
padding=1,
bias=False,
bnName=self.bn_name,
activationName=self.activation_name)
self.add_block_list(layer1.get_name(), layer1, self.first_output)
self.make_layer(self.first_output, self.num_cells)
self.downsample(self.first_output * 2)
self.make_layer(self.first_output * 2, self.num_cells)
self.downsample(self.first_output * 4)
self.make_layer(self.first_output * 4, self.num_cells)
def make_layer(self, planes, num_cells):
for _ in range(num_cells):
temp_block = self.block(self.in_planes, planes, stride=1,
bn_name=self.bn_name, activation_name=self.activation_name)
self.add_block_list(temp_block.get_name(), temp_block, planes)
self.in_planes = planes
def downsample(self, planes):
down_block = self.block(self.in_planes, planes, stride=2,
bn_name=self.bn_name, activation_name=self.activation_name)
self.add_block_list(down_block.get_name(), down_block, planes)
self.in_planes = planes
def forward(self, x):
output_list = []
for block in self._modules.values():
x = block(x)
output_list.append(x)
return output_list
def pnasnet_A(data_channel):
model = PNASNet(data_channel=data_channel,
num_cells=6,
num_planes=44,
block=CellA)
model.set_name(BackboneName.PNASNetA)
return model
def pnasnet_B(data_channel):
model = PNASNet(data_channel=data_channel,
num_cells=6, num_planes=32,
block=CellB)
model.set_name(BackboneName.PNASNetB)
return model
| 35.892473
| 95
| 0.612942
| 386
| 3,338
| 5.012953
| 0.238342
| 0.056848
| 0.069767
| 0.037209
| 0.357623
| 0.328682
| 0.285271
| 0.234625
| 0.131266
| 0.131266
| 0
| 0.010748
| 0.303176
| 3,338
| 92
| 96
| 36.282609
| 0.821152
| 0.034452
| 0
| 0.117647
| 0
| 0
| 0.005602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102941
| false
| 0
| 0.073529
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9a268f19adc7700cf1335eb9dfc2c8d74c5a4dc
| 2,208
|
py
|
Python
|
tools/utils.py
|
vahini01/electoral_rolls
|
82e42a6ee68844b1c8ac7899e8e7bf7a24e48d44
|
[
"MIT"
] | 16
|
2018-01-22T02:03:09.000Z
|
2022-02-24T07:16:47.000Z
|
tools/utils.py
|
vahini01/electoral_rolls
|
82e42a6ee68844b1c8ac7899e8e7bf7a24e48d44
|
[
"MIT"
] | 2
|
2019-02-01T02:48:17.000Z
|
2020-09-06T06:09:35.000Z
|
tools/utils.py
|
vahini01/electoral_rolls
|
82e42a6ee68844b1c8ac7899e8e7bf7a24e48d44
|
[
"MIT"
] | 8
|
2018-01-22T06:48:07.000Z
|
2021-08-08T16:26:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 10 23:28:58 2017
@author: dhingratul
"""
import urllib.request
import os
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from bs4 import BeautifulSoup
import ssl
import requests
import wget
from PyPDF2 import PdfFileReader
def download_file(pdf_url, mdir, filename, flag=False):
if flag is True:
context = ssl._create_unverified_context()
response = urllib.request.urlopen(pdf_url, context=context)
else:
response = urllib.request.urlopen(pdf_url)
filename = mdir + filename
file = open(filename, 'wb')
file.write(response.read())
if os.stat(filename).st_size == 0:
flag = 0
else:
flag = 1
file.close()
return flag
def download_file_R(pdf_url, mdir, filename, file_out):
requests.packages.urllib3.disable_warnings()
while True: # Keep trying until the webpage successfully downloads
try:
r = requests.get(pdf_url, verify=False, timeout=10)
break # If it downloads, get out and get on with life
# If it doesn't download after the timeout period, an exceptions is thrown, and we try again
except requests.exceptions.RequestException as e:
with open(file_out, "a") as myfile:
myfile.write(pdf_url + '\n')
filename = mdir + filename
with open(filename, 'wb') as f:
f.write(r.content)
if os.stat(filename).st_size == 0:
flag = 0
else:
flag = 1
return flag
def download_file_W(pdf_url, mdir, filename, flag=False):
filename = mdir + filename
ssl._create_default_https_context = ssl._create_unverified_context
wget.download(pdf_url, filename)
if os.stat(filename).st_size == 0:
flag = 0
else:
flag = 1
return flag
def getDriver(url):
driver = webdriver.Chrome()
driver.get(url)
return driver
def is_valid_pdf(fn):
"""Check is the PDF valid """
try:
with open(fn, 'rb') as f:
pdf = PdfFileReader(f)
numpages = pdf.numPages
return (numpages > 0)
except Exception as e:
return False
| 25.976471
| 100
| 0.646286
| 299
| 2,208
| 4.668896
| 0.391304
| 0.034384
| 0.032235
| 0.038682
| 0.259312
| 0.18553
| 0.098138
| 0.098138
| 0.098138
| 0.098138
| 0
| 0.017748
| 0.259964
| 2,208
| 84
| 101
| 26.285714
| 0.836597
| 0.14221
| 0
| 0.33871
| 0
| 0
| 0.004795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.145161
| 0
| 0.322581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9a3883f0ea5d080d5d4d2e05df6fadcaeb5c36e
| 1,956
|
py
|
Python
|
exp/viz_raw_manhattan.py
|
ellencwade/coronavirus-2020
|
b71e018deb8df8450b4d88ddbcd6ded6497aa8f9
|
[
"MIT"
] | null | null | null |
exp/viz_raw_manhattan.py
|
ellencwade/coronavirus-2020
|
b71e018deb8df8450b4d88ddbcd6ded6497aa8f9
|
[
"MIT"
] | null | null | null |
exp/viz_raw_manhattan.py
|
ellencwade/coronavirus-2020
|
b71e018deb8df8450b4d88ddbcd6ded6497aa8f9
|
[
"MIT"
] | null | null | null |
"""
Experiment summary
------------------
Treat each province/state in a country cases over time
as a vector, do a simple K-Nearest Neighbor between
countries. What country has the most similar trajectory
to a given country?
Plots similar countries
"""
import sys
sys.path.insert(0, '..')
from utils import data
import os
import sklearn
import numpy as np
import json
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# ------------ HYPERPARAMETERS -------------
BASE_PATH = '../COVID-19/csse_covid_19_data/'
# ------------------------------------------
confirmed = os.path.join(
BASE_PATH,
'csse_covid_19_time_series',
'time_series_covid19_confirmed_global.csv')
confirmed = data.load_csv_data(confirmed)
features = []
targets = []
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(111)
cm = plt.get_cmap('jet')
NUM_COLORS = 0
LINE_STYLES = ['solid', 'dashed', 'dotted']
NUM_STYLES = len(LINE_STYLES)
dist_diff = os.path.join('../exp/results/', 'knn_raw.json')
f = open(dist_diff,)
dist_diff = json.load(f)
for region, dist in dist_diff.items():
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(111)
cm = plt.get_cmap('jet')
other_region = dist['manhattan'][0]
regions = [region, other_region]
for val in regions:
df = data.filter_by_attribute(
confirmed, "Country/Region", val)
cases, labels = data.get_cases_chronologically(df)
cases = cases.sum(axis=0)
lines = ax.plot(cases, label=val)
ax.set_ylabel('# of confirmed cases')
ax.set_xlabel("Time (days since Jan 22, 2020)")
ax.set_yscale('log')
ax.legend()
plt.tight_layout()
region = region.replace('*', '')
other_region = other_region.replace('*', '')
plt.title(f'Comparing confirmed cases in {region} and {other_region}')
plt.savefig(f'results/raw_manhattan/{region}.png')
plt.close()
print(region)
| 26.432432
| 74
| 0.658487
| 270
| 1,956
| 4.614815
| 0.485185
| 0.044141
| 0.017657
| 0.041734
| 0.089888
| 0.089888
| 0.089888
| 0.089888
| 0.089888
| 0.089888
| 0
| 0.019692
| 0.169223
| 1,956
| 74
| 75
| 26.432432
| 0.747077
| 0.169734
| 0
| 0.16
| 0
| 0
| 0.214109
| 0.080446
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.14
| 0
| 0.14
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9a428c026d2352f281b2b7ddd8ec8a286d37297
| 5,290
|
py
|
Python
|
rational/mxnet/rationals.py
|
steven-lang/rational_activations
|
234623dbb9360c215c430185b09e2237d5186b54
|
[
"MIT"
] | null | null | null |
rational/mxnet/rationals.py
|
steven-lang/rational_activations
|
234623dbb9360c215c430185b09e2237d5186b54
|
[
"MIT"
] | null | null | null |
rational/mxnet/rationals.py
|
steven-lang/rational_activations
|
234623dbb9360c215c430185b09e2237d5186b54
|
[
"MIT"
] | null | null | null |
"""
Rational Activation Functions for MXNET
=======================================
This module allows you to create Rational Neural Networks using Learnable
Rational activation functions with MXNET networks.
"""
import mxnet as mx
from mxnet import initializer
from mxnet.gluon import HybridBlock
from rational.utils.get_weights import get_parameters
from rational.mxnet.versions import _version_a, _version_b, _version_c, _version_d
from rational._base.rational_base import Rational_base
class Rational(Rational_base, HybridBlock):
"""
Rational Activation Function, inheriting from ``mxnet.gluon.HybridBlock``.
Arguments:
approx_func (str):
The name of the approximated function for initialisation.
The different functions are available in `rational.rationals_config.json`.
Default: ``leaky_relu``
degrees (tuple of int):
The degrees of the numerator (P) and denominator (Q).
Default ``(5, 4)``
cuda (bool):
whether to execute on cuda device.
NOTE: THIS PARAMETER IS CURRENTLY NOT CONSIDERED.
CUDA GPUS ARE USED WHEN IT IS POSSIBLE
version (str):
Version of Rational to use. Rational(x) = P(x)/Q(x),
where
P(x) = (a_0 + a_1 * x + a_2 * x^2 + ... + a_n * x^n) and
`A`: Q(x) = (1 + |b_0 * x| + | b_1 * x^2| + ... + | b_m * x^{m+1}|)
`B`: Q(x) = (1 + |b_0 * x + b_1 * x^2 + ... + b_m * x^{m + 1}|)
`C`: Q(x) = (0.1 + |b_0 + b_1 * x + b_2 * x^2 + ... + b_m * x^m|)
`D`: like `B` with noised coefficients b_i
Default ``A``
trainable (bool):
Whether the weights are trainable, i.e, if they are updated during
backward pass.
Default ``True``
Returns:
HybridBlock:
Rational hybrid block
"""
def __init__(self, approx_func='leaky_relu', degrees=(5, 4), cuda=False,
version='A', trainable=True, **kwargs):
super(Rational, self).__init__(**kwargs)
# read initial parameter configuration from external files
w_numerator, w_denominator = get_parameters(
version, degrees, approx_func)
# convert w_numerator and w_denominator to mxnet arrays
w_numerator = mx.nd.array(w_numerator)
w_denominator = mx.nd.array(w_denominator)
# register the amount of weights in numerator and denominator, since we need them during
# symbolic execution, but are unable to retrieve them at later stages
self.numerator_length = len(w_numerator)
self.denominator_length = len(w_denominator)
self.training = trainable
self.degrees = degrees
self.version = version
self.init_approximation = approx_func
# set specified context (currently not happening, since unclear, how and why helpful)
# self.device = gpu() if cuda else cpu()
# register and configure weights (numerator and denominator coefficients)
with self.name_scope():
self.numerator = self.params.get(name='w_numerator', shape=(len(w_numerator),),
init=initializer.Constant(
w_numerator),
grad_req='write' if trainable
else 'null',
differentiable=trainable)
self.denominator = self.params.get(name='w_denominator', shape=(len(w_denominator),),
init=initializer.Constant(
w_denominator),
grad_req='write' if trainable
else 'null',
differentiable=trainable)
# register whether function is trainable, since this information needs to be passed to
# version D
self.training = trainable
self.init_approximation = approx_func
# set rational activation function version
self.rational_func = {'A': _version_a, 'B': _version_b, 'C': _version_c, 'D': _version_d} \
.get(version)
if self.rational_func is None:
raise ValueError(
"rational activation function version %s not implemented" % version)
def hybrid_forward(self, F, x, numerator, denominator):
return self.rational_func(F, x, numerator, denominator, self.training,
self.numerator_length, self.denominator_length)
def numpy(self):
"""
Returns a numpy version of this activation function.
"""
from rational.numpy import Rational as Rational_numpy
rational_n = Rational_numpy(self.init_approximation, self.degrees,
self.version)
rational_n.numerator = self.numerator.data().asnumpy().tolist()
rational_n.denominator = self.denominator.data().asnumpy().tolist()
return rational_n
| 42.66129
| 99
| 0.56276
| 584
| 5,290
| 4.943493
| 0.304795
| 0.02771
| 0.027018
| 0.004157
| 0.085902
| 0.073433
| 0.0478
| 0.0478
| 0.0478
| 0.010391
| 0
| 0.006928
| 0.34518
| 5,290
| 123
| 100
| 43.00813
| 0.826501
| 0.413422
| 0
| 0.24
| 0
| 0
| 0.038147
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.14
| 0.02
| 0.26
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9a6621d903359b14c87695eb4a1ac8dcea18138
| 844
|
py
|
Python
|
torchflare/criterion/utils.py
|
Neklaustares-tPtwP/torchflare
|
7af6b01ef7c26f0277a041619081f6df4eb1e42c
|
[
"Apache-2.0"
] | 1
|
2021-09-14T08:38:05.000Z
|
2021-09-14T08:38:05.000Z
|
torchflare/criterion/utils.py
|
weidao-Shi/torchflare
|
3c55b5a0761f2e85dd6da95767c6ec03f0f5baad
|
[
"Apache-2.0"
] | null | null | null |
torchflare/criterion/utils.py
|
weidao-Shi/torchflare
|
3c55b5a0761f2e85dd6da95767c6ec03f0f5baad
|
[
"Apache-2.0"
] | 1
|
2021-08-06T19:24:43.000Z
|
2021-08-06T19:24:43.000Z
|
"""Utils for criterion."""
import torch
import torch.nn.functional as F
def normalize(x, axis=-1):
"""Performs L2-Norm."""
num = x
denom = torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12
return num / denom
# Source : https://github.com/earhian/Humpback-Whale-Identification-1st-/blob/master/models/triplet_loss.py
def euclidean_dist(x, y):
"""Computes Euclidean distance."""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(x, 2).sum(1, keepdim=True).expand(m, m).t()
dist = xx + yy - 2 * torch.matmul(x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
def cosine_dist(x, y):
"""Computes Cosine Distance."""
x = F.normalize(x, dim=1)
y = F.normalize(y, dim=1)
dist = 2 - 2 * torch.mm(x, y.t())
return dist
| 26.375
| 107
| 0.613744
| 141
| 844
| 3.64539
| 0.439716
| 0.015564
| 0.099222
| 0.054475
| 0.124514
| 0.124514
| 0.124514
| 0.124514
| 0.124514
| 0.124514
| 0
| 0.031019
| 0.197867
| 844
| 31
| 108
| 27.225806
| 0.728213
| 0.236967
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.111111
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9a90a5af3f207f1020cbf41f94830b75e23fbc9
| 4,411
|
py
|
Python
|
readthedocs/donate/forms.py
|
gamearming/readthedocs
|
53d0094f657f549326a86b8bd0ccf924c2126941
|
[
"MIT"
] | null | null | null |
readthedocs/donate/forms.py
|
gamearming/readthedocs
|
53d0094f657f549326a86b8bd0ccf924c2126941
|
[
"MIT"
] | null | null | null |
readthedocs/donate/forms.py
|
gamearming/readthedocs
|
53d0094f657f549326a86b8bd0ccf924c2126941
|
[
"MIT"
] | null | null | null |
"""Forms for RTD donations"""
import logging
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from readthedocs.payments.forms import StripeModelForm, StripeResourceMixin
from readthedocs.payments.utils import stripe
from .models import Supporter
log = logging.getLogger(__name__)
class SupporterForm(StripeResourceMixin, StripeModelForm):
"""Donation support sign up form
This extends the basic payment form, giving fields for credit card number,
expiry, and CVV. The proper Knockout data bindings are established on
:py:class:`StripeModelForm`
"""
class Meta:
model = Supporter
fields = (
'last_4_digits',
'name',
'email',
'dollars',
'logo_url',
'site_url',
'public',
)
labels = {
'public': _('Make this donation public'),
}
help_texts = {
'public': _('Your name and image will be displayed on the donation page'),
'email': _('Your email is used for Gravatar and so we can send you a receipt'),
'logo_url': _("URL of your company's logo, images should be 300x300 pixels or less"),
'dollars': _('Companies donating over $400 can specify a logo URL and site link'),
}
widgets = {
'dollars': forms.HiddenInput(attrs={
'data-bind': 'value: dollars'
}),
'logo_url': forms.TextInput(attrs={
'data-bind': 'value: logo_url, enable: urls_enabled'
}),
'site_url': forms.TextInput(attrs={
'data-bind': 'value: site_url, enable: urls_enabled'
}),
'last_4_digits': forms.TextInput(attrs={
'data-bind': 'valueInit: card_digits, value: card_digits'
}),
}
last_4_digits = forms.CharField(widget=forms.HiddenInput(), required=True)
name = forms.CharField(required=True)
email = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(SupporterForm, self).__init__(*args, **kwargs)
def validate_stripe(self):
"""Call stripe for payment (not ideal here) and clean up logo < $200"""
dollars = self.cleaned_data['dollars']
if dollars < 200:
self.cleaned_data['logo_url'] = None
self.cleaned_data['site_url'] = None
stripe.Charge.create(
amount=int(self.cleaned_data['dollars']) * 100,
currency='usd',
source=self.cleaned_data['stripe_token'],
description='Read the Docs Sustained Engineering',
receipt_email=self.cleaned_data['email']
)
def save(self, commit=True):
supporter = super(SupporterForm, self).save(commit)
if commit and self.user is not None and self.user.is_authenticated():
supporter.user = self.user
supporter.save()
return supporter
class EthicalAdForm(StripeResourceMixin, StripeModelForm):
"""Payment form for ethical ads
This extends the basic payment form, giving fields for credit card number,
expiry, and CVV. The proper Knockout data bindings are established on
:py:class:`StripeModelForm`
"""
class Meta:
model = Supporter
fields = (
'last_4_digits',
'name',
'email',
'dollars',
)
help_texts = {
'email': _('Your email is used so we can send you a receipt'),
}
widgets = {
'dollars': forms.HiddenInput(attrs={
'data-bind': 'value: dollars'
}),
'last_4_digits': forms.TextInput(attrs={
'data-bind': 'valueInit: card_digits, value: card_digits'
}),
}
last_4_digits = forms.CharField(widget=forms.HiddenInput(), required=True)
name = forms.CharField(required=True)
email = forms.CharField(required=True)
def validate_stripe(self):
stripe.Charge.create(
amount=int(self.cleaned_data['dollars']) * 100,
currency='usd',
source=self.cleaned_data['stripe_token'],
description='Read the Docs Sponsorship Payment',
receipt_email=self.cleaned_data['email']
)
| 33.416667
| 97
| 0.594423
| 478
| 4,411
| 5.349372
| 0.320084
| 0.038717
| 0.052796
| 0.028158
| 0.525616
| 0.509973
| 0.484943
| 0.44036
| 0.44036
| 0.397341
| 0
| 0.008729
| 0.298798
| 4,411
| 131
| 98
| 33.671756
| 0.817976
| 0.112446
| 0
| 0.510204
| 0
| 0
| 0.230052
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.071429
| 0
| 0.22449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9ad95f0461bd02e44c310b1381567e8524c288c
| 6,258
|
py
|
Python
|
pandas_datareaders_unofficial/datareaders/google_finance_options.py
|
movermeyer/pandas_datareaders_unofficial
|
458dcf473d070cd7686d53d4a9b479cbe0ab9218
|
[
"BSD-3-Clause"
] | 18
|
2015-02-05T01:42:51.000Z
|
2020-12-27T19:24:25.000Z
|
pandas_datareaders_unofficial/datareaders/google_finance_options.py
|
movermeyer/pandas_datareaders_unofficial
|
458dcf473d070cd7686d53d4a9b479cbe0ab9218
|
[
"BSD-3-Clause"
] | 1
|
2015-01-12T11:08:02.000Z
|
2015-01-13T09:14:47.000Z
|
pandas_datareaders_unofficial/datareaders/google_finance_options.py
|
femtotrader/pandas_datareaders
|
458dcf473d070cd7686d53d4a9b479cbe0ab9218
|
[
"BSD-3-Clause"
] | 13
|
2015-09-10T19:39:51.000Z
|
2022-01-06T17:08:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .base import DataReaderBase
from ..tools import COL, _get_dates, to_float, to_int
import pandas as pd
#from pandas.tseries.frequencies import to_offset
from six.moves import cStringIO as StringIO
import logging
import traceback
import datetime
import json
import token, tokenize
def ymd_to_date(y, m, d):
"""
Returns date
>>> expiration = {u'd': 1, u'm': 12, u'y': 2014}
>>> ymd_to_date(**expiration)
datetime.date(2014, 12, 1)
>>> ymd_to_date(2014, 3, 1)
datetime.date(2014, 3, 1)
"""
return(datetime.date(year=y, month=m, day=d))
def date_to_ymd(date):
"""
Returns dict like {'y': ..., 'm': ..., 'd': ...}
>>> date_to_ymd(datetime.date(year=2010, month=1, day=3))
{'y': 2010, 'm': 1, 'd': 3}
"""
d = {
'y': date.year,
'm': date.month,
'd': date.day
}
return(d)
def fix_lazy_json(in_text):
"""
Handle lazy JSON - to fix expecting property name
this function fixes the json output from google
http://stackoverflow.com/questions/4033633/handling-lazy-json-in-python-expecting-property-name
"""
tokengen = tokenize.generate_tokens(StringIO(in_text).readline)
result = []
for tokid, tokval, _, _, _ in tokengen:
# fix unquoted strings
if (tokid == token.NAME):
if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']:
tokid = token.STRING
tokval = u'"%s"' % tokval
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')
# remove invalid commas
elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
if (len(result) > 0) and (result[-1][1] == ','):
result.pop()
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')
result.append((tokid, tokval))
return tokenize.untokenize(result)
def json_decode(json_string):
try:
ret = json.loads(json_string)
except:
json_string = fix_lazy_json(json_string)
ret = json.loads(json_string)
return ret
class DataReaderGoogleFinanceOptions(DataReaderBase):
"""
DataReader to fetch data from Google Finance Options
see https://www.google.com/finance/option_chain
https://github.com/makmac213/python-google-option-chain
http://www.drtomstarke.com/index.php/option-chains-from-google-finance-api
"""
def init(self, *args, **kwargs):
self._get_multi = self._get_multi_todict
def _get_one(self, name, *args, **kwargs):
return(self._get_one_raw(name, 'All', 'json'))
def _get_one_raw(self, symbol, typ='All', output='json', y='2014', m='12', d='1'):
url = "https://www.google.com/finance/option_chain"
params = {
'q': symbol,
'type': typ,
'output': output,
}
data = self._get_content(url, params)
d = {}
lst = []
for typ in [u'puts', u'calls']:
df_typ = pd.DataFrame(data[typ])
df_typ['Type'] = typ
lst.append(df_typ)
del data[typ]
for i, expiration in enumerate(data['expirations']):
params = {
'q': symbol,
'output': output,
'expy': expiration['y'],
'expm': expiration['m'],
'expd': expiration['d'],
}
data = self._get_content(url, params)
for typ in [u'puts', u'calls']:
df_typ = pd.DataFrame(data[typ])
df_typ['Type'] = typ
lst.append(df_typ)
del data[typ]
lst.append(df_typ)
df = pd.concat(lst, axis=0, ignore_index=True)
d_cols = {
"a": "Ask",
"b": "Bid",
"p": "Last",
"strike": "Strike",
"expiry": "Expiry",
"vol": "Volume",
"name": "Name"
}
df = df.rename(columns=d_cols)
"""
d_cols = {
"a": "ask",
"b": "bid",
"c": "change",
"cid": "identity code",
"cp": "cp"
"cs": change direction. "chg" = up, "chr" = down, "chg"?
"e": # I think this tells us something about what country where the stock is traded. "OPRA" means USA.
"expiry": expiration date for this option
"name": I don't know. I have never seen a value for this
"oi": open interest. How many of these are currently being held by others.
See, http://www.investopedia.com/terms/o/openinterest.asp
"p": price, last
"s": option code.
Basically, Stock Symbol + 7 if mini option + date + "C" or "P" + price
"strike": "strike price for this option"
"vol": "the volume of options traded."
}
"""
for col in ['Ask', 'Bid', 'c', 'cp', 'Last', 'Strike']:
df[col] = df[col].map(to_float)
for col in ['Volume', 'oi', 'cid']:
df[col] = df[col].map(to_int)
df['Expiry'] = pd.to_datetime(df['Expiry'])
data['options'] = df
data['underlying_id'] = int(data['underlying_id'])
data['expiry'] = ymd_to_date(**data['expiry'])
for i, expiration in enumerate(data['expirations']):
data['expirations'][i] = ymd_to_date(**expiration)
#for col in ['Volume']:
# df[col] = df[col].fillna(0)
#d = {}
#d["options"] = df
#return(d)
return(data)
def _get_content(self, url, params):
#response = requests.get(url, params=params)
response = self.session.get(url, params=params)
if response.status_code == 200:
content_json = response.text
data = json_decode(content_json)
return(data)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29.942584
| 115
| 0.527964
| 754
| 6,258
| 4.270557
| 0.324934
| 0.01087
| 0.013975
| 0.013043
| 0.198758
| 0.180745
| 0.146584
| 0.1
| 0.1
| 0.1
| 0
| 0.016197
| 0.319271
| 6,258
| 208
| 116
| 30.086538
| 0.739671
| 0.167945
| 0
| 0.281818
| 0
| 0
| 0.087476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072727
| false
| 0
| 0.090909
| 0.009091
| 0.190909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9adb9ef68a4c2ce5de1ed13aea3230964400996
| 5,039
|
py
|
Python
|
keras_textclassification/data_preprocess/generator_preprocess.py
|
Vail-qin/Keras-TextClassification
|
8acda5ae37db2647c8ecaa70027ffc6003d2abca
|
[
"MIT"
] | 1
|
2019-12-27T16:59:16.000Z
|
2019-12-27T16:59:16.000Z
|
keras_textclassification/data_preprocess/generator_preprocess.py
|
Yolo-Cultivate/Keras-TextClassification
|
183cf7b3483588bfe10d19b65124e52df5b338f8
|
[
"MIT"
] | null | null | null |
keras_textclassification/data_preprocess/generator_preprocess.py
|
Yolo-Cultivate/Keras-TextClassification
|
183cf7b3483588bfe10d19b65124e52df5b338f8
|
[
"MIT"
] | 1
|
2022-01-11T06:37:54.000Z
|
2022-01-11T06:37:54.000Z
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2019/11/2 21:08
# @author : Mo
# @function:
from keras_textclassification.data_preprocess.text_preprocess import load_json, save_json
from keras_textclassification.conf.path_config import path_model_dir
path_fast_text_model_vocab2index = path_model_dir + 'vocab2index.json'
path_fast_text_model_l2i_i2l = path_model_dir + 'l2i_i2l.json'
import numpy as np
import os
class PreprocessGenerator:
"""
数据预处理, 输入为csv格式, [label,ques]
"""
def __init__(self):
self.l2i_i2l = None
if os.path.exists(path_fast_text_model_l2i_i2l):
self.l2i_i2l = load_json(path_fast_text_model_l2i_i2l)
def prereocess_idx(self, pred):
if os.path.exists(path_fast_text_model_l2i_i2l):
pred_i2l = {}
i2l = self.l2i_i2l['i2l']
for i in range(len(pred)):
pred_i2l[i2l[str(i)]] = pred[i]
pred_i2l_rank = [sorted(pred_i2l.items(), key=lambda k: k[1], reverse=True)]
return pred_i2l_rank
else:
raise RuntimeError("path_fast_text_model_label2index is None")
def prereocess_pred_xid(self, pred):
if os.path.exists(path_fast_text_model_l2i_i2l):
pred_l2i = {}
l2i = self.l2i_i2l['l2i']
for i in range(len(pred)):
pred_l2i[pred[i]] = l2i[pred[i]]
pred_l2i_rank = [sorted(pred_l2i.items(), key=lambda k: k[1], reverse=True)]
return pred_l2i_rank
else:
raise RuntimeError("path_fast_text_model_label2index is None")
def preprocess_get_label_set(self, path):
# 首先获取label,set,即存在的具体类
label_set = set()
len_all = 0
file_csv = open(path, "r", encoding="utf-8")
for line in file_csv:
len_all += 1
if len_all > 1: # 第一条是标签'label,ques',不选择
line_sp = line.split(",")
label_org = str(line_sp[0]).strip().upper()
label_real = "NAN" if label_org=="" else label_org
label_set.add(label_real)
file_csv.close()
return label_set, len_all
def preprocess_label_ques_to_idx(self, embedding_type, batch_size, path, embed, rate=1):
label_set, len_all = self.preprocess_get_label_set(path)
# 获取label转index字典等, 如果label2index存在则不转换了, dev验证集合的时候用
if not os.path.exists(path_fast_text_model_l2i_i2l):
count = 0
label2index = {}
index2label = {}
for label_one in label_set:
label2index[label_one] = count
index2label[count] = label_one
count = count + 1
l2i_i2l = {}
l2i_i2l['l2i'] = label2index
l2i_i2l['i2l'] = index2label
save_json(l2i_i2l, path_fast_text_model_l2i_i2l)
else:
l2i_i2l = load_json(path_fast_text_model_l2i_i2l)
# 读取数据的比例
len_ql = int(rate * len_all)
if len_ql <= 500: # sample时候不生效,使得语料足够训练
len_ql = len_all
def process_line(line):
# 对每一条数据操作,获取label和问句index
line_sp = line.split(",")
ques = str(line_sp[1]).strip().upper()
label = str(line_sp[0]).strip().upper()
label = "NAN" if label == "" else label
que_embed = embed.sentence2idx(ques)
label_zeros = [0] * len(l2i_i2l['l2i'])
label_zeros[l2i_i2l['l2i'][label]] = 1
return que_embed, label_zeros
while True:
file_csv = open(path, "r", encoding="utf-8")
cout_all_line = 0
cnt = 0
x, y = [], []
# 跳出循环
if len_ql < cout_all_line:
break
for line in file_csv:
cout_all_line += 1
if cout_all_line > 1: # 第一条是标签'label,ques',不选择
x_line, y_line = process_line(line)
x.append(x_line)
y.append(y_line)
cnt += 1
if cnt == batch_size:
if embedding_type in ['bert', 'albert']:
x_, y_ = np.array(x), np.array(y)
x_1 = np.array([x[0] for x in x_])
x_2 = np.array([x[1] for x in x_])
x_all = [x_1, x_2]
elif embedding_type == 'xlnet':
x_, y_ = x, np.array(y)
x_1 = np.array([x[0][0] for x in x_])
x_2 = np.array([x[1][0] for x in x_])
x_3 = np.array([x[2][0] for x in x_])
x_all = [x_1, x_2, x_3]
else:
x_all, y_ = np.array(x), np.array(y)
cnt = 0
yield (x_all, y_)
x, y =[], []
file_csv.close()
print("preprocess_label_ques_to_idx ok")
| 36.781022
| 92
| 0.523318
| 646
| 5,039
| 3.773994
| 0.218266
| 0.049221
| 0.054143
| 0.076702
| 0.370796
| 0.322395
| 0.30886
| 0.248975
| 0.226005
| 0.211649
| 0
| 0.039798
| 0.371701
| 5,039
| 136
| 93
| 37.051471
| 0.730259
| 0.059536
| 0
| 0.219048
| 0
| 0
| 0.040825
| 0.019562
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.038095
| 0
| 0.142857
| 0.009524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9b0df7f5ef294a68858d836af143c289d120187
| 4,375
|
py
|
Python
|
Object_detection_image.py
|
hiperus0988/pyao
|
72c56975a3d45aa033bdf7650b5369d59240395f
|
[
"Apache-2.0"
] | 1
|
2021-06-09T22:17:57.000Z
|
2021-06-09T22:17:57.000Z
|
Object_detection_image.py
|
hiperus0988/pyao
|
72c56975a3d45aa033bdf7650b5369d59240395f
|
[
"Apache-2.0"
] | null | null | null |
Object_detection_image.py
|
hiperus0988/pyao
|
72c56975a3d45aa033bdf7650b5369d59240395f
|
[
"Apache-2.0"
] | null | null | null |
######## Image Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 1/15/18
# Description:
# This program uses a TensorFlow-trained classifier to perform object detection.
# It loads the classifier uses it to perform object detection on an image.
# It draws boxes and scores around the objects of interest in the image.
## Some of the code is copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it more understandable to me.
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
IMAGE_NAME = 'test1.jpg'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
# Path to image
PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 6
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.60)
# All the results have been drawn on image. Now display the image.
cv2.imshow('Object detector', image)
# Press any key to close the image
cv2.waitKey(0)
# Clean up
cv2.destroyAllWindows()
| 36.458333
| 122
| 0.779886
| 680
| 4,375
| 4.836765
| 0.377941
| 0.045607
| 0.025844
| 0.034965
| 0.082396
| 0.06689
| 0.034661
| 0
| 0
| 0
| 0
| 0.007451
| 0.141029
| 4,375
| 119
| 123
| 36.764706
| 0.867749
| 0.511314
| 0
| 0
| 0
| 0
| 0.083977
| 0.012066
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9b42bca24804913cf6908775c04bc29a0bec6df
| 1,469
|
py
|
Python
|
model/contact.py
|
hubogeri/python_training
|
7a918040e4c8bae5a031134911bc8b465f322699
|
[
"Apache-2.0"
] | null | null | null |
model/contact.py
|
hubogeri/python_training
|
7a918040e4c8bae5a031134911bc8b465f322699
|
[
"Apache-2.0"
] | null | null | null |
model/contact.py
|
hubogeri/python_training
|
7a918040e4c8bae5a031134911bc8b465f322699
|
[
"Apache-2.0"
] | null | null | null |
from sys import maxsize
class Contact:
def __init__(self, fname=None, mname=None, lname=None, nick=None, title=None, comp=None, addr=None,
home=None, mobile=None, work=None, fax=None, email1=None, email2=None, email3=None,
homepage=None, bday=None, bmonth=None, byear=None, aday=None, amonth=None, ayear=None,
secaddr=None, secphone=None, note=None, id =None):
self.fname = fname
self.mname = mname
self.lname = lname
self.nick = nick
self.title = title
self.comp = comp
self.addr = addr
self.home = home
self.mobile = mobile
self.work = work
self.fax = fax
self.email1 = email1
self.email2 = email2
self.email3 = email3
self.homepage = homepage
self.bday = bday
self.bmonth = bmonth
self.byear = byear
self.aday = aday
self.amonth = amonth
self.ayear = ayear
self.secaddr = secaddr
self.secphone = secphone
self.note = note
self.id = id
def __repr__(self):
return "%s:%s:%s" % (self.id, self.fname, self.lname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.fname == other.fname and self.lname == other.lname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 30.604167
| 135
| 0.571137
| 191
| 1,469
| 4.319372
| 0.246073
| 0.043636
| 0.019394
| 0.024242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009091
| 0.326072
| 1,469
| 47
| 136
| 31.255319
| 0.824242
| 0
| 0
| 0
| 0
| 0
| 0.00545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.025
| 0.05
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9b4cabd9071c90b544409b5b87e3302450b1278
| 11,342
|
py
|
Python
|
test/IECore/BasicPreset.py
|
ericmehl/cortex
|
054839cc709ce153d1bcaaefe7f340ebe641ec82
|
[
"BSD-3-Clause"
] | 386
|
2015-01-02T11:10:43.000Z
|
2022-03-10T15:12:20.000Z
|
test/IECore/BasicPreset.py
|
ericmehl/cortex
|
054839cc709ce153d1bcaaefe7f340ebe641ec82
|
[
"BSD-3-Clause"
] | 484
|
2015-01-09T18:28:06.000Z
|
2022-03-31T16:02:04.000Z
|
test/IECore/BasicPreset.py
|
ericmehl/cortex
|
054839cc709ce153d1bcaaefe7f340ebe641ec82
|
[
"BSD-3-Clause"
] | 99
|
2015-01-28T23:18:04.000Z
|
2022-03-27T00:59:39.000Z
|
##########################################################################
#
# Copyright (c) 2010-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os
import sys
import shutil
import unittest
import IECore
class TestBasicPreset( unittest.TestCase ) :
def testCopy( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
p = IECore.BasicPreset( testObj, testObj.parameters() )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
p2 = IECore.BasicPreset( testObj, testObj.parameters(), parameters=( testObj.parameters()["a"], ) )
self.assertTrue( p2.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p2.applicableTo( testObj2, testObj.parameters() ) )
p2( testObj2, testObj2.parameters() )
self.assertEqual( testObj2.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj2.parameters()["c"].getTypedValue(), 0.0 )
def testLoad( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised1" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
messageHandler = IECore.CapturingMessageHandler()
with messageHandler :
p = IECore.BasicPreset( os.path.join( savePath, "basicPresetLoadTest", "basicPresetLoadTest-1.cob" ) )
self.assertEqual( len( messageHandler.messages ), 0 )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
def testSave( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised1" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
preset = IECore.BasicPreset( testObj, testObj.parameters() )
# Save for the classLoader and check its there, we test the 'loadability' later...
preset.save( savePath, "basicPresetTest" )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest", "basicPresetTest-1.cob" ) ) )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest", "basicPresetTest-1.py" ) ) )
# save without the classLoader and check its there
preset.save( savePath, "basicPresetTest", classLoadable=False )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest.cob" ) ) )
# reload
p = IECore.BasicPreset( os.path.join( savePath, "basicPresetTest.cob" ) )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
preset2 = IECore.BasicPreset( testObj, testObj.parameters(), parameters=( testObj.parameters()["a"], ) )
preset2.save( savePath, "basicPresetTest2", classLoadable=False )
#reload
p2 = IECore.BasicPreset( os.path.join( savePath, "basicPresetTest2.cob" ) )
self.assertTrue( p2.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p2.applicableTo( testObj2, testObj.parameters() ) )
p2( testObj2, testObj2.parameters() )
self.assertEqual( testObj2.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj2.parameters()["c"].getTypedValue(), 0.0 )
def testClassLoader( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
preset = IECore.BasicPreset( testObj, testObj.parameters() )
preset.save( savePath, "basicPresetTestClassLoader" )
# make sure that no messages are emitted during loading
messageHandler = IECore.CapturingMessageHandler()
with messageHandler :
loader = IECore.ClassLoader( IECore.SearchPath( savePath ) )
p = loader.load( "basicPresetTestClassLoader" )()
self.assertEqual( len( messageHandler.messages ), 0 )
self.assertTrue( isinstance( p, IECore.BasicPreset ) )
p.metadata()
def testClasses( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.ClassParameter( "b", "", "IECORE_OP_PATHS", os.path.join( "maths", "multiply" ), 2 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.ClassParameter( "c", "", "IECORE_OP_PATHS" ),
]
)
classes1 = testObj.parameters()["b"].getClass( True )
classes2 = testObj2.parameters()["c"].getClass( True )
self.assertNotEqual( classes1[1:], classes2[1:] )
p = IECore.BasicPreset( testObj, testObj.parameters()["b"] )
self.assertTrue( p.applicableTo( testObj, testObj.parameters()["b"] ) )
self.assertFalse( p.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p.applicableTo( testObj2, testObj2.parameters()["c"] ) )
p( testObj2, testObj2.parameters()["c"] )
classes1 = testObj.parameters()["b"].getClass( True )
classes2 = testObj2.parameters()["c"].getClass( True )
self.assertEqual( classes1[1:], classes2[1:] )
def testClassVectors( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.ClassVectorParameter( "b", "", "IECORE_OP_PATHS" ),
]
)
testObj.parameters()["b"].setClasses(
[
( "mult", os.path.join( "maths", "multiply" ), 2 ),
( "coIO", "compoundObjectInOut", 1 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.ClassVectorParameter( "c", "", "IECORE_OP_PATHS" ),
]
)
classes1 = [ c[1:] for c in testObj.parameters()["b"].getClasses( True ) ]
classes2 = [ c[1:] for c in testObj2.parameters()["c"].getClasses( True ) ]
self.assertNotEqual( classes1, classes2 )
p = IECore.BasicPreset( testObj, testObj.parameters()["b"] )
self.assertTrue( p.applicableTo( testObj, testObj.parameters()["b"] ) )
self.assertFalse( p.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p.applicableTo( testObj2, testObj2.parameters()["c"] ) )
p( testObj2, testObj2.parameters()["c"] )
classes1 = [ c[1:] for c in testObj.parameters()["b"].getClasses( True ) ]
classes2 = [ c[1:] for c in testObj2.parameters()["c"].getClasses( True ) ]
self.assertEqual( classes1, classes2 )
def testCompoundVectorParameter( self ) :
p = IECore.Parameterised( "test" )
p.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.CompoundVectorParameter(
"c",
"",
members = [
IECore.StringVectorParameter( "s", "", IECore.StringVectorData() ),
IECore.BoolVectorParameter( "b", "", IECore.BoolVectorData() ),
]
)
]
)
p["c"]["s"].setValue( IECore.StringVectorData( [ "1", "2", "3" ] ) )
p["c"]["b"].setValue( IECore.BoolVectorData( [ True, False, True ] ) )
v = p.parameters().getValue().copy()
preset = IECore.BasicPreset( p, p.parameters() )
self.assertTrue( preset.applicableTo( p, p.parameters() ) )
p.parameters().setValue( p.parameters().defaultValue )
self.assertNotEqual( p.parameters().getValue(), v )
preset( p, p.parameters() )
self.assertEqual( p.parameters().getValue(), v )
def tearDown( self ) :
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
paths = (
os.path.join( savePath, "basicPresetTest" ),
os.path.join( savePath, "basicPresetTest.cob" ),
os.path.join( savePath, "basicPresetTest2.cob" ),
os.path.join( savePath, "basicPresetTestClassLoader" ),
)
for p in paths :
if os.path.isdir( p ) :
shutil.rmtree( p )
elif os.path.isfile( p ) :
os.remove( p )
if __name__ == "__main__":
unittest.main()
| 33.655786
| 107
| 0.677923
| 1,199
| 11,342
| 6.381985
| 0.202669
| 0.102196
| 0.059592
| 0.040251
| 0.657998
| 0.625457
| 0.58965
| 0.570047
| 0.533717
| 0.515682
| 0
| 0.014208
| 0.162229
| 11,342
| 336
| 108
| 33.755952
| 0.791097
| 0.156145
| 0
| 0.46729
| 0
| 0
| 0.085242
| 0.013213
| 0
| 0
| 0
| 0
| 0.182243
| 1
| 0.037383
| false
| 0
| 0.028037
| 0
| 0.070093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9b4da54ad6bdf7efb1efb5b210a443bc83b0db4
| 12,492
|
py
|
Python
|
rlpy/Domains/Pacman.py
|
imanolarrieta/RL
|
072a8c328652f45e053baecd640f04adf7f84b49
|
[
"BSD-3-Clause"
] | 1
|
2019-12-07T13:47:43.000Z
|
2019-12-07T13:47:43.000Z
|
rlpy/Domains/Pacman.py
|
imanolarrieta/RL
|
072a8c328652f45e053baecd640f04adf7f84b49
|
[
"BSD-3-Clause"
] | null | null | null |
rlpy/Domains/Pacman.py
|
imanolarrieta/RL
|
072a8c328652f45e053baecd640f04adf7f84b49
|
[
"BSD-3-Clause"
] | null | null | null |
"""Pacman game domain."""
from rlpy.Tools import __rlpy_location__
from .Domain import Domain
from .PacmanPackage import layout, pacman, game, ghostAgents
from .PacmanPackage import graphicsDisplay
import numpy as np
from copy import deepcopy
import os
import time
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = "Austin Hays"
class Pacman(Domain):
"""
Pacman domain, which acts as a wrapper for the Pacman implementation
from the BerkeleyX/CS188.1x course project 3.
**STATE:** The state vector has a series of dimensions:
* [2] The x and y coordinates of pacman
* [3 * ng] the x and y coordinates as well as the scare time of each ghost
("scare time" is how long the ghost remains scared after consuming a capsule.)
* [nf] binary variables indicating if a food is still on the board or not
* [nc] binary variables for each capsule indicating if it is still on the board or not
*nf* and *nc* are map-dependent, and *ng* can be set as a parameter.
Based on above, total dimensionality of state vector is map-dependent,
and given by (2 + 3*ng + nf + nc).
**ACTIONS:** Move Pacman [up, down, left, right, stay]
**REWARD:** See the Berkeley project website below for more info.
.. note::
The visualization runs as fast as your CPU will permit; to slow things
down so gameplay is actually visible, de-comment time.sleep()
in the showDomain() method.
**REFERENCE:** This domain is an RLPy wrapper for the implementation
from the `BerkeleyX/CS188.1x course project 3 <https://courses.edx.org/courses/BerkeleyX/CS188.1x/2013_Spring/courseware/Week_9/Project_3_Reinforcement/>`_
See the original `source code (zipped) <https://courses.edx.org/static/content-berkeley-cs188x~2013_Spring/projects/reinforcement/reinforcement.zip>`_
For more details of the domain see the original package in the `Domains/PacmanPackage` folder.
"""
_max_scared_time = 39
actions = ["Stop", "North", "East", "South", "West"]
actions_num = 5
episodeCap = 1000
#: location of layouts shipped with rlpy
default_layout_dir = os.path.join(
__rlpy_location__, "Domains", "PacmanPackage",
"layouts")
def __init__(self, noise=.1, timeout=30,
layoutFile=os.path.join(
default_layout_dir, 'trickyClassic.lay'),
numGhostAgents=1000):
"""
layoutFile:
filename of the map file
noise:
with this probability pacman makes a random move instead the one
specified by the action
"""
self.noise = noise
# Specifies which Pacman world you want
self.layoutFile = layoutFile
# Puts the file in line stripped format
layout_file_content = self._tryToLoad(self.layoutFile)
self.layout = layout.Layout(layout_file_content)
# Number of ghosts
self.numGhostAgents = numGhostAgents
# Intitializes Pacman game
self.game_state = pacman.GameState()
self.game_rules = pacman.ClassicGameRules(timeout)
self.layout_copy = deepcopy(self.layout)
self.game_state.data.initialize(self.layout_copy, self.numGhostAgents)
self.num_total_food = len(self.layout_copy.food.asList())
self.num_total_capsules = len(self.layout_copy.capsules)
self._defaultSettings()
self.restartGraphics = None
self.timerswitch = False
self.savedtimer = None
self.gameDisplay = None
self._set_statespace_limits()
super(Pacman, self).__init__()
def _set_statespace_limits(self):
# Makes an array of limits for each dimension in the state vector.
statespace_limits = []
# adds pacman x, y locations
statespace_limits.append([1, self.layout.width - 2])
statespace_limits.append([1, self.layout.height - 2])
# adds ghost x, y locations and scaredTimer (how long they can be
# eaten)
for ghost in self.game_state.data.agentStates[1:]:
statespace_limits.append([1, self.layout.width - 2])
statespace_limits.append([1, self.layout.height - 2])
statespace_limits.append([0, self._max_scared_time])
statespace_limits += [[0, 1]] * (
self.num_total_food + self.num_total_capsules)
self.statespace_limits = np.array(statespace_limits, dtype="float")
def _set_state(self, s):
"""
Takes a vector s and sets the internal game state used by the original
pacman package.
"""
# copies most recent state
data = self.game_state.data
agent_states = data.agentStates
# set pacman position
agent_states.configuration.pos = (s[0], s[1])
# set ghost position
num_ghosts = len(agent_states) - 1
for i in range(1, num_ghosts + 1):
part_s = s[(3 * i) - 1:3 * i]
agent_states[i].configuration.pos = (part_s[0], part_s[1])
agent_states[i].scaredTimer = part_s[2]
# set food and capsules locations
s_food = s[(num_ghosts + 1) * 3:]
x = 0
y = 0
i = 0
data.capsules = []
for char in str(self.layout_copy):
if char == ".":
data.food[x][y] = bool(s_food[i])
i += 1
elif char == "o":
coord = (x, self.layout_copy.height - y)
if s_food[i]:
data.capsules.append(coord)
i += 1
elif char == "\n":
y += 1
x = -1
x += 1
def _get_state(self):
"""
get the internal game state represented as a numpy array
"""
data = self.game_state.data
agent_states = self.game_state.data.agentStates
num_ghosts = len(agent_states) - 1
s = np.zeros(
2 + num_ghosts * 3 + self.num_total_food + self.num_total_capsules)
# get pacman position
s[:2] = agent_states[0].configuration.pos
# import ipdb; ipdb.set_trace()
# get ghost info
for i in range(num_ghosts):
s[2 + i * 3: 2 + i * 3 + 2] = agent_states[i + 1].configuration.pos
s[2 + i * 3 + 2] = agent_states[i + 1].scaredTimer
# get food and capsules status
i = 2 + num_ghosts * 3
x = 0
y = 0
for char in str(self.layout_copy):
if char == ".":
s[i] = data.food[x][y]
i += 1
elif char == "\n":
y += 1
x = -1
elif char == "o":
coord = (x, self.layout_copy.height - y)
if coord in data.capsules:
s[i] = 1.
i += 1
x += 1
return s
state = property(_get_state, _set_state)
def showDomain(self, a, s=None):
if s is not None:
errStr = 'ERROR: In Pacman.py, attempted to pass a state (s)'\
'to showDomain(); Pacman only supports internal states.'\
'If you do pass a state parameter, ensure it is set to None.'
raise Exception(errStr)
s = self.game_state
if self.gameDisplay is None:
self.gameDisplay = graphicsDisplay.PacmanGraphics()
self.gameDisplay.startGraphics(self)
self.gameDisplay.drawStaticObjects(s.data)
self.gameDisplay.drawAgentObjects(s.data)
elif self._cleanup_graphics:
self._cleanup_graphics = False
self.gameDisplay.removeAllFood()
self.gameDisplay.removeAllCapsules()
self.gameDisplay.food = self.gameDisplay.drawFood(
self.gameDisplay.layout.food)
self.gameDisplay.capsules = self.gameDisplay.drawCapsules(
self.gameDisplay.layout.capsules)
# converts s vector in pacman gamestate instance and updates
# the display every time pacman or a ghost moves.
# s.data.food is the correct food matrix
s.data.layout.food = s.data.food
for agent in range(len(s.data.agentStates)):
s.data._agentMoved = agent
self.gameDisplay.update(s.data)
s._foodEaten = None
s._capsuleEaten = None
# time.sleep(0.1) # Sleep for 0.1 sec
def step(self, a):
"""
Applies actions from outside the Pacman domain to the given state.
Internal states accounted for along with scoring and terminal checking.
Returns a tuple of form (reward, new state vector, terminal)
"""
if self.random_state.random_sample() < self.noise:
# Random Move
a = self.random_state.choice(self.possibleActions())
a = self.actions[a]
next_state_p = self.game_state.generateSuccessor(0, a)
next_state = next_state_p
# pacman performs action "a" in current state object
# pacman.PacmanRules.applyAction(self.game_state, a)
# pacman.GhostRules.checkDeath(self.game_state, 0)
# the ghosts move randomly
for i in range(1, len(self.game_state.data.agentStates)):
if next_state.isWin() or next_state.isLose():
break
ghostOptions = pacman.GhostRules.getLegalActions(next_state, i)
# TODO: use domain random stream
randomAction_ind = self.random_state.randint(len(ghostOptions))
randomAction = ghostOptions[randomAction_ind]
next_state = next_state.generateSuccessor(i, randomAction)
# keep track of eaten stuff for graphics (original code assumes
# graphics are updated after every agent's move)
next_state.data._foodEaten = next_state_p.data._foodEaten
next_state.data._capsuleEaten = next_state_p.data._capsuleEaten
# scoring in pacman
r = next_state.data.score - self.game_state.data.score
self.game_state = next_state
terminal = self.isTerminal()
return r, self._get_state(), terminal, self.possibleActions()
def s0(self):
"""
re-initializes internal states when an episode starts, returns a s vector
"""
self.game_state = pacman.GameState()
self.game_rules = pacman.ClassicGameRules(timeout=30)
self.layout_copy = deepcopy(self.layout)
self.game = self.game_rules.newGame(
self.layout_copy, pacman, self.ghosts, DummyGraphics(), self.beQuiet, catchExceptions=False)
self.game_state.data.initialize(self.layout_copy, self.numGhostAgents)
self._cleanup_graphics = True
return self.state, self.isTerminal(), self.possibleActions()
def possibleActions(self):
if self.isTerminal():
# somewhat hacky, but should not matter anyway, maybe clean up in
# the future
return np.array([0])
# makes an array of possible actions pacman can perform at any given
# state
possibleActions = []
possibleMoves = pacman.GameState.getLegalActions(
self.game_state, agentIndex=0)
for a in possibleMoves:
possibleActions.append(self.actions.index(a))
return np.array(possibleActions)
def isTerminal(self):
"""
Checks whether the game should terminate at the given state.
(Terminate for failure, ie eaten by ghost or out of time, and for
success, all food on map eaten.)
If game should terminate, returns the proper indication to step function.
Accounts for scoring changes in terminal states.
"""
return self.game_state.data._lose or self.game_state.data._win
def _defaultSettings(self):
self.ghostNum = 2
self.ghosts = [ghostAgents.RandomGhost(
game.Agent) for i in range(self.ghostNum)]
self.beQuiet = False
def _tryToLoad(self, fullname):
# used in getLayout function
f = open(fullname)
grid = [line.strip() for line in f]
f.close()
return grid
class DummyGraphics(object):
def initialize(self, *arg, **kwargs):
pass
def update(self, *arg, **kwargs):
pass
def finalize(self, *arg, **kwargs):
pass
| 38.795031
| 159
| 0.616394
| 1,559
| 12,492
| 4.815266
| 0.264272
| 0.023445
| 0.031171
| 0.022646
| 0.170641
| 0.143866
| 0.130944
| 0.116558
| 0.092847
| 0.066871
| 0
| 0.013697
| 0.292827
| 12,492
| 321
| 160
| 38.915888
| 0.836088
| 0.285383
| 0
| 0.21466
| 0
| 0
| 0.044839
| 0
| 0
| 0
| 0
| 0.003115
| 0
| 1
| 0.073298
| false
| 0.026178
| 0.041885
| 0
| 0.193717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9b62ab258f0b51ef25d431f8fa66de9acd438a7
| 1,895
|
py
|
Python
|
setup.py
|
giggslam/python-messengerbot-sdk
|
4a6fadf96fe3425da9abc4726fbb84db6d84f7b5
|
[
"Apache-2.0"
] | 23
|
2019-03-05T08:33:34.000Z
|
2021-12-13T01:52:47.000Z
|
setup.py
|
giggslam/python-messengerbot-sdk
|
4a6fadf96fe3425da9abc4726fbb84db6d84f7b5
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
giggslam/python-messengerbot-sdk
|
4a6fadf96fe3425da9abc4726fbb84db6d84f7b5
|
[
"Apache-2.0"
] | 6
|
2019-03-07T07:58:02.000Z
|
2020-12-18T10:08:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
__version__ = ''
with open('facebookbot/__about__.py', 'r') as fd:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fd:
m = reg.match(line)
if m:
__version__ = m.group(1)
break
def _requirements():
with open('requirements.txt', 'r') as fd:
return [name.strip() for name in fd.readlines()]
with open('README.rst', 'r') as fd:
long_description = fd.read()
setup(
name="fbsdk",
version=__version__,
author="Sam Chang",
author_email="[email protected]",
maintainer="Sam Chang",
maintainer_email="[email protected]",
url="https://github.com/boompieman/fbsdk",
description="Facebook Messaging API SDK for Python",
long_description=long_description,
license='Apache License 2.0',
packages=[
"facebookbot", "facebookbot.models"
],
install_requires=_requirements(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Topic :: Software Development"
]
)
| 30.079365
| 76
| 0.663852
| 238
| 1,895
| 5.168067
| 0.57563
| 0.04878
| 0.012195
| 0.026016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020175
| 0.215303
| 1,895
| 62
| 77
| 30.564516
| 0.806994
| 0.302902
| 0
| 0
| 0
| 0
| 0.343272
| 0.050459
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.1
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9b76c6f6bd2bcb1986a9d9701e4ee097a1ff3bf
| 18,905
|
py
|
Python
|
src/transformers/models/mmbt/modeling_mmbt.py
|
MaximovaIrina/transformers
|
033c3ed95a14b58f5a657f5124bc5988e4109c9f
|
[
"Apache-2.0"
] | 1
|
2022-01-12T11:39:47.000Z
|
2022-01-12T11:39:47.000Z
|
src/transformers/models/mmbt/modeling_mmbt.py
|
AugustVIII/transformers
|
185876392c0dcd4c4bb02f2750822144a3bee545
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/mmbt/modeling_mmbt.py
|
AugustVIII/transformers
|
185876392c0dcd4c4bb02f2750822144a3bee545
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch MMBT model. """
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput
from ...modeling_utils import ModuleUtilsMixin
from ...utils import logging
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "MMBTConfig"
class ModalEmbeddings(nn.Module):
"""Generic Modal Embeddings which takes in an encoder, and a transformer embedding."""
def __init__(self, config, encoder, embeddings):
super().__init__()
self.config = config
self.encoder = encoder
self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size)
self.position_embeddings = embeddings.position_embeddings
self.token_type_embeddings = embeddings.token_type_embeddings
self.word_embeddings = embeddings.word_embeddings
self.LayerNorm = embeddings.LayerNorm
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None):
token_embeddings = self.proj_embeddings(self.encoder(input_modal))
seq_length = token_embeddings.size(1)
if start_token is not None:
start_token_embeds = self.word_embeddings(start_token)
seq_length += 1
token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1)
if end_token is not None:
end_token_embeds = self.word_embeddings(end_token)
seq_length += 1
token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device)
position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length)
if token_type_ids is None:
token_type_ids = torch.zeros(
(input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device
)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = token_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
MMBT_START_DOCSTRING = r"""
MMBT model was proposed in [Supervised Multimodal Bitransformers for Classifying Images and Text](https://github.com/facebookresearch/mmbt) by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine.
It's a supervised multimodal bitransformer model that fuses information from text and other image encoders, and
obtain state-of-the-art performance on various multimodal classification benchmark tasks.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config ([`MMBTConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration.
transformer (:class: *~nn.Module*): A text transformer that is used by MMBT.
It should have embeddings, encoder, and pooler attributes.
encoder (:class: *~nn.Module*): Encoder for the second modality.
It should take in a batch of modal inputs and return k, n dimension embeddings.
"""
MMBT_INPUTS_DOCSTRING = r"""
Args:
input_modal (`torch.FloatTensor` of shape `(batch_size, ***)`):
The other modality data. It will be the shape that the encoder for that type expects. e.g. With an Image
Encoder, the shape would be (batch_size, channels, height, width)
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. It does not expect [CLS] token to be added as it's
appended to the end of other modality embeddings. Indices can be obtained using
[`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
modal_start_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for classification
tasks.
modal_end_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used.
attention_mask (*optional*) `torch.FloatTensor` of shape `(batch_size, sequence_length)`:
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, sequence_length)`:
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
modal_token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, modal_sequence_length)`:
Segment token indices to indicate different portions of the non-text modality. The embeddings from these
tokens will be summed with the respective token embeddings for the non-text modality.
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
modal_position_ids (`torch.LongTensor` of shape `(batch_size, modal_sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings for the non-text modality.
Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, embedding_dim)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MMBT Model outputting raw hidden-states without any specific head on top.",
MMBT_START_DOCSTRING,
)
class MMBTModel(nn.Module, ModuleUtilsMixin):
def __init__(self, config, transformer, encoder):
super().__init__()
self.config = config
self.transformer = transformer
self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings)
@add_start_docstrings_to_model_forward(MMBT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_modal,
input_ids=None,
modal_start_tokens=None,
modal_end_tokens=None,
attention_mask=None,
token_type_ids=None,
modal_token_type_ids=None,
position_ids=None,
modal_position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Examples::
# For example purposes. Not runnable.
transformer = BertModel.from_pretrained('bert-base-uncased')
encoder = ImageEncoder(args)
mmbt = MMBTModel(config, transformer, encoder)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_txt_shape = input_ids.size()
elif inputs_embeds is not None:
input_txt_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
modal_embeddings = self.modal_encoder(
input_modal,
start_token=modal_start_tokens,
end_token=modal_end_tokens,
position_ids=modal_position_ids,
token_type_ids=modal_token_type_ids,
)
input_modal_shape = modal_embeddings.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device)
txt_embeddings = self.transformer.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1)
input_shape = embedding_output.size()[:-1]
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
else:
attention_mask = torch.cat(
[torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1
)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(input_shape, device=device)
else:
encoder_attention_mask = torch.cat(
[torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1
)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, self.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.transformer.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.transformer.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@add_start_docstrings(
"""
MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
""",
MMBT_START_DOCSTRING,
MMBT_INPUTS_DOCSTRING,
)
class MMBTForClassification(nn.Module):
r"""
**labels**: (*optional*) `torch.LongTensor` of shape `(batch_size,)`:
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns: *Tuple* comprising various elements depending on the configuration (config) and inputs: **loss**:
(*optional*, returned when `labels` is provided) `torch.FloatTensor` of shape `(1,)`: Classification (or
regression if config.num_labels==1) loss. **logits**: `torch.FloatTensor` of shape `(batch_size, config.num_labels)` Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (*optional*, returned when `output_hidden_states=True`) list of `torch.FloatTensor` (one for
the output of each layer + the output of the embeddings) of shape `(batch_size, sequence_length, hidden_size)`:
Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**:
(*optional*, returned when `output_attentions=True`) list of `torch.FloatTensor` (one for each layer) of shape
`(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used
to compute the weighted average in the self-attention heads.
Examples:
```python
# For example purposes. Not runnable.
transformer = BertModel.from_pretrained('bert-base-uncased')
encoder = ImageEncoder(args)
model = MMBTForClassification(config, transformer, encoder)
outputs = model(input_modal, input_ids, labels=labels)
loss, logits = outputs[:2]
```"""
def __init__(self, config, transformer, encoder):
super().__init__()
self.num_labels = config.num_labels
self.mmbt = MMBTModel(config, transformer, encoder)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(
self,
input_modal,
input_ids=None,
modal_start_tokens=None,
modal_end_tokens=None,
attention_mask=None,
token_type_ids=None,
modal_token_type_ids=None,
position_ids=None,
modal_position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mmbt(
input_modal=input_modal,
input_ids=input_ids,
modal_start_tokens=modal_start_tokens,
modal_end_tokens=modal_end_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
modal_token_type_ids=modal_token_type_ids,
position_ids=position_ids,
modal_position_ids=modal_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 46.794554
| 209
| 0.685533
| 2,371
| 18,905
| 5.255588
| 0.169127
| 0.019501
| 0.021186
| 0.020544
| 0.351416
| 0.306958
| 0.25953
| 0.232084
| 0.172298
| 0.137389
| 0
| 0.004011
| 0.23507
| 18,905
| 403
| 210
| 46.91067
| 0.857686
| 0.139646
| 0
| 0.229167
| 0
| 0.03125
| 0.414009
| 0.024936
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0.003472
| 0.024306
| 0.003472
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9b8347698a1fe18b6d9ec66f6bfbfa77f2567be
| 1,566
|
py
|
Python
|
using_paramiko.py
|
allupramodreddy/cisco_py
|
5488b56d9324011860b78998e694dcce6da5e3d1
|
[
"Apache-2.0"
] | null | null | null |
using_paramiko.py
|
allupramodreddy/cisco_py
|
5488b56d9324011860b78998e694dcce6da5e3d1
|
[
"Apache-2.0"
] | null | null | null |
using_paramiko.py
|
allupramodreddy/cisco_py
|
5488b56d9324011860b78998e694dcce6da5e3d1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/local/bin/python3
import paramiko,time
#using as SSH Client
client = paramiko.SSHClient()
# check dir(client) to find available options.
# auto adjust host key verification with yes or no
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# time for connecting to remote Cisco IOS
"""
Manually taking input
addr = input('Provide IP address to connect to: ')
user = input('Username: ')
pwd = getpass.getpass('Password: ')"""
# Taking input from files
f1 = open("devices.txt","r")
f2 = open("commands.txt","r")
for line in f1:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
data = line.split(" ")
# print(data)
addr = data[0]
user = data[1]
pwd = data[2]
f3 = open(addr+".txt","w+")
# print(addr +" "+ user +" " +pwd)
client.connect(addr,username=user,password=pwd,allow_agent=False,look_for_keys=False)
# we have to ask for Shell
device_access = client.invoke_shell()
for line in f2:
device_access.send(line)
time.sleep(1)
output = device_access.recv(55000).decode('ascii')
f3.write(output)
"""
THIS CODE IS FOR SINGLE COMMAND, FOR MULTIPLE COMMANDS CODE BELOW
# send command to the device
device_access.send("ter len 0\nshow run \n")
time.sleep(2)
# receive output from the device, convert it to byte-like format and print it
print(device_access.recv(550000).decode('ascii'))
# We can print the same to a file too
with open("csr1000v.txt","w") as f:
f.write(device_access.recv(550000).decode('ascii'))"""
| 23.727273
| 89
| 0.691571
| 234
| 1,566
| 4.551282
| 0.495727
| 0.067606
| 0.04507
| 0.037559
| 0.155869
| 0.155869
| 0.093897
| 0.093897
| 0
| 0
| 0
| 0.026377
| 0.176884
| 1,566
| 66
| 90
| 23.727273
| 0.799845
| 0.17433
| 0
| 0.2
| 0
| 0
| 0.051893
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.05
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9b86cc42aaff67200ff3f4f5f6d27121835fd8c
| 733
|
py
|
Python
|
old/.history/a_20201125192943.py
|
pscly/bisai1
|
e619186cec5053a8e02bd59e48fc3ad3af47d19a
|
[
"MulanPSL-1.0"
] | null | null | null |
old/.history/a_20201125192943.py
|
pscly/bisai1
|
e619186cec5053a8e02bd59e48fc3ad3af47d19a
|
[
"MulanPSL-1.0"
] | null | null | null |
old/.history/a_20201125192943.py
|
pscly/bisai1
|
e619186cec5053a8e02bd59e48fc3ad3af47d19a
|
[
"MulanPSL-1.0"
] | null | null | null |
# for n in range(400,500):
# i = n // 100
# j = n // 10 % 10
# k = n % 10
# if n == i ** 3 + j ** 3 + k ** 3:
# print(n)
# 第一道题(16)
# input("请输入(第一次):")
# s1 = input("请输入(第二次):")
# l1 = s1.split(' ')
# l2 = []
# for i in l1:
# if i.isdigit():
# l2.append(int(i))
# for i in l2:
# if not (i % 6):
# print(i, end=" ")
# 第二道题(17)
out_l1 = []
def bian_int_list(l1):
re_l1 = [] # 返回出去的列表
for i in l1:
re_l1.append(i)
def jisuan(str_num):
he1 = 0
global out_l1
for i in l1():
he1 += int(i)**2
if he1 > int(str_num):
out_l1.append(str_num)
return None
while 1:
in_1 = input("请输入数值:")
nums_l1 = in_1.split(' ')
| 13.089286
| 39
| 0.452933
| 121
| 733
| 2.636364
| 0.421488
| 0.050157
| 0.075235
| 0.075235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099576
| 0.356071
| 733
| 55
| 40
| 13.327273
| 0.576271
| 0.472033
| 0
| 0
| 0
| 0
| 0.019718
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9b92da15285253454115ccfc5647355f3c2b100
| 345
|
py
|
Python
|
yzcore/templates/project_template/src/const/_job.py
|
lixuemin13/yz-core
|
82774f807ac1002b77d0cc90f6695b1cc6ba0820
|
[
"MIT"
] | 6
|
2021-01-26T10:27:04.000Z
|
2022-03-19T16:13:12.000Z
|
yzcore/templates/project_template/src/const/_job.py
|
lixuemin13/yz-core
|
82774f807ac1002b77d0cc90f6695b1cc6ba0820
|
[
"MIT"
] | null | null | null |
yzcore/templates/project_template/src/const/_job.py
|
lixuemin13/yz-core
|
82774f807ac1002b77d0cc90f6695b1cc6ba0820
|
[
"MIT"
] | 2
|
2021-07-27T04:11:51.000Z
|
2022-01-06T09:36:06.000Z
|
#!/usr/bin/python3.6.8+
# -*- coding:utf-8 -*-
"""
@auth: cml
@date: 2020-12-2
@desc: ...
"""
class JobStatus(object):
PENDING = 0 # 任务等待执行
STARTED = 100 # 任务执行开始
PROCESS = 110
POLLING = 120
CALLBACK = 130
SUCCESS = 200 # 任务执行成功
RETRY = 300 # 任务重试
FAILURE = 400 # 任务执行失败
REVOKED = 500 # 任务撤销
| 15
| 28
| 0.53913
| 42
| 345
| 4.428571
| 0.97619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151261
| 0.310145
| 345
| 22
| 29
| 15.681818
| 0.630252
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9b95364464c7d47db46ee15f7524a804b79ea1b
| 10,311
|
py
|
Python
|
pyboleto/html.py
|
RenanPalmeira/pyboleto
|
7b12a7a2f7e92cad5f35f843ae67c397b6f7e36e
|
[
"BSD-3-Clause"
] | null | null | null |
pyboleto/html.py
|
RenanPalmeira/pyboleto
|
7b12a7a2f7e92cad5f35f843ae67c397b6f7e36e
|
[
"BSD-3-Clause"
] | null | null | null |
pyboleto/html.py
|
RenanPalmeira/pyboleto
|
7b12a7a2f7e92cad5f35f843ae67c397b6f7e36e
|
[
"BSD-3-Clause"
] | 1
|
2019-03-20T01:01:00.000Z
|
2019-03-20T01:01:00.000Z
|
# -*- coding: utf-8 -*-
"""
pyboleto.html
~~~~~~~~~~~~~
Classe Responsável por fazer o output do boleto em html.
:copyright: © 2012 by Artur Felipe de Sousa
:license: BSD, see LICENSE for more details.
"""
import os
import string
import sys
import codecs
import base64
from itertools import chain
if sys.version_info < (3,):
from itertools import izip_longest as zip_longest
zip_longest # chamando para evitar erro de nao uso do zip_longest
else:
from itertools import zip_longest
DIGITS = [
['n', 'n', 'w', 'w', 'n'],
['w', 'n', 'n', 'n', 'w'],
['n', 'w', 'n', 'n', 'w'],
['w', 'w', 'n', 'n', 'n'],
['n', 'n', 'w', 'n', 'w'],
['w', 'n', 'w', 'n', 'n'],
['n', 'w', 'w', 'n', 'n'],
['n', 'n', 'n', 'w', 'w'],
['w', 'n', 'n', 'w', 'n'],
['n', 'w', 'n', 'w', 'n'],
]
class BoletoHTML(object):
"""Geração do Boleto em HTML
Esta classe é responsável por imprimir o boleto em HTML.
Outras classes podem ser implementadas no futuro com a mesma interface,
para fazer output em LaTeX, etc ...
Esta classe pode imprimir boletos em formato de carnê (2 boletos por
página) ou em formato de folha cheia.
:param file_descr: Um arquivo ou *file-like* class.
:param landscape: Formato da folha. Usar ``True`` para boleto
tipo carnê.
"""
def __init__(self, file_descr, landscape=False):
# Tamanhos em px
self.width = 750
self.widthCanhoto = 0
self.fontSizeTitle = 9
self.heightLine = 27
self.fontSizeValue = 12
self.title = 'Boleto bancário'
self.fileDescr = file_descr
if landscape:
raise NotImplementedError('Em desenvolvimento...')
else:
tpl = string.Template(self._load_template('head.html'))
self.html = tpl.substitute(title=self.title, width=self.width,
font_size_value=self.fontSizeValue,
height_line=self.heightLine,
font_size_title=self.fontSizeTitle)
def _load_template(self, template):
pyboleto_dir = os.path.dirname(os.path.abspath(__file__))
template_path = os.path.join(pyboleto_dir, 'templates', template)
with open(template_path, 'r') as tpl:
template_content = tpl.read()
return template_content
def _load_image(self, logo_image):
pyboleto_dir = os.path.dirname(os.path.abspath(__file__))
image_path = os.path.join(pyboleto_dir, 'media', logo_image)
return image_path
def _drawReciboSacado(self, boletoDados):
"""Imprime o Recibo do Sacado para modelo de página inteira
:param boletoDados: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:type boletoDados: :class:`pyboleto.data.BoletoData`
"""
tpl = string.Template(self._load_template('recibo_sacado.html'))
tpl_data = {}
# Cabeçalho
tpl_data['logo_img'] = ''
if boletoDados.logo_image:
img = codecs.open(self._load_image(boletoDados.logo_image))
aux = img.read()
aux = base64.b64encode(aux)
img_base64 = 'data:image/jpeg;base64,{0}'.format(aux)
tpl_data['logo_img'] = img_base64
tpl_data['codigo_dv_banco'] = boletoDados.codigo_dv_banco
# Corpo
tpl_data['cedente'] = boletoDados.cedente
tpl_data['agencia_conta_cedente'] = boletoDados.agencia_conta_cedente
tpl_data['cedente_documento'] = boletoDados.cedente_documento
data_vencimento = boletoDados.data_vencimento
tpl_data['data_vencimento'] = data_vencimento.strftime('%d/%m/%Y')
tpl_data['sacado'] = boletoDados.sacado[0]
tpl_data['nosso_numero_format'] = boletoDados.format_nosso_numero()
tpl_data['numero_documento'] = boletoDados.numero_documento
data_documento = boletoDados.data_documento
tpl_data['data_documento'] = data_documento.strftime('%d/%m/%Y')
tpl_data['cedente_endereco'] = boletoDados.cedente_endereco
valor_doc = self._formataValorParaExibir(boletoDados.valor_documento)
tpl_data['valor_documento'] = valor_doc
# Demonstrativo
tpl_data['demonstrativo'] = ''
for dm in boletoDados.demonstrativo:
tpl_data['demonstrativo'] += '<p>{0}</p>'.format(dm)
self.html += tpl.substitute(tpl_data)
def _drawHorizontalCorteLine(self):
self.html += '<hr />'
def _drawReciboCaixa(self, boletoDados):
"""Imprime o Recibo do Caixa
:param boletoDados: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:type boletoDados: :class:`pyboleto.data.BoletoData`
"""
tpl = string.Template(self._load_template('recibo_caixa.html'))
tpl_data = {}
# Cabeçalho
tpl_data['logo_img'] = ''
if boletoDados.logo_image:
img = codecs.open(self._load_image(boletoDados.logo_image))
aux = img.read()
aux = base64.b64encode(aux)
img_base64 = 'data:image/jpeg;base64,{0}'.format(aux)
tpl_data['logo_img'] = img_base64
tpl_data['codigo_dv_banco'] = boletoDados.codigo_dv_banco
tpl_data['linha_digitavel'] = boletoDados.linha_digitavel
# Corpo
data_vencimento = boletoDados.data_vencimento
tpl_data['data_vencimento'] = data_vencimento.strftime('%d/%m/%Y')
# value em unicode em data.py
if isinstance(boletoDados.local_pagamento, unicode):
tpl_data['local_pagamento'] = boletoDados.local_pagamento.encode
('utf-8')
else:
tpl_data['local_pagamento'] = boletoDados.local_pagamento
tpl_data['cedente'] = boletoDados.cedente
tpl_data['agencia_conta_cedente'] = boletoDados.agencia_conta_cedente
data_documento = boletoDados.data_documento
tpl_data['data_documento'] = data_documento.strftime('%d/%m/%Y')
tpl_data['numero_documento'] = boletoDados.numero_documento
tpl_data['especie_documento'] = boletoDados.especie_documento
tpl_data['aceite'] = boletoDados.aceite
data_process = boletoDados.data_processamento
tpl_data['data_processamento'] = data_process.strftime('%d/%m/%Y')
tpl_data['nosso_numero_format'] = boletoDados.format_nosso_numero()
tpl_data['carteira'] = boletoDados.carteira
tpl_data['especie'] = boletoDados.especie
tpl_data['quantidade'] = boletoDados.quantidade
valor = self._formataValorParaExibir(boletoDados.valor)
tpl_data['valor'] = valor
valor_doc = self._formataValorParaExibir(boletoDados.valor_documento)
tpl_data['valor_documento'] = valor_doc
# Instruções
tpl_data['instrucoes'] = ''
for instrucao in boletoDados.instrucoes:
tpl_data['instrucoes'] += '<p>{0}</p>'.format(instrucao)
# Rodapé
tpl_data['sacado_info'] = ''
for linha_sacado in boletoDados.sacado:
tpl_data['sacado_info'] += '<p>{0}</p>'.format(linha_sacado)
# Código de barras
tpl_data['barcode'] = self._codigoBarraI25(boletoDados.barcode)
self.html += tpl.substitute(tpl_data)
def drawCanhoto(self, html):
if html:
self.html += str(html)
def printPage(self):
self.html += '<script>window.print();</script>'
def drawBoletoCarneDuplo(self, boletoDados1, boletoDados2=None):
"""Imprime um boleto tipo carnê com 2 boletos por página.
:param boletoDados1: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:param boletoDados2: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:type boletoDados1: :class:`pyboleto.data.BoletoData`
:type boletoDados2: :class:`pyboleto.data.BoletoData`
"""
raise NotImplementedError('Em desenvolvimento')
def drawBoleto(self, boletoDados):
"""Imprime Boleto Convencional
Você pode chamar este método diversas vezes para criar um arquivo com
várias páginas, uma por boleto.
:param boletoDados: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:type boletoDados: :class:`pyboleto.data.BoletoData`
"""
self._drawReciboSacado(boletoDados)
self._drawHorizontalCorteLine()
self._drawReciboCaixa(boletoDados)
self._drawHorizontalCorteLine()
def nextPage(self):
"""Força início de nova página"""
self.html += '</div><div class="pagina">'
def save(self):
"""Fecha boleto e constroi o arquivo"""
self.html += '</div></body></html>'
if hasattr(self.fileDescr, 'write'):
self.fileDescr.write(self.html)
else:
with open(self.fileDescr, 'w') as fd:
fd.write(self.html)
def _formataValorParaExibir(self, nfloat):
if nfloat:
txt = nfloat
txt = txt.replace('.', ',')
else:
txt = ""
return txt
def _codigoBarraI25(self, code):
"""Imprime Código de barras otimizado para boletos
http://en.wikipedia.org/wiki/Interleaved_2_of_5
"""
digits = ['n', 'n s', 'n', 'n s']
if len(code) % 2 != 0:
code = '0' + code
for digt1, digt2 in self._grouper(2, code):
digt1_repr = DIGITS[int(digt1)]
digt2_repr = map(lambda x: x + ' s', DIGITS[int(digt2)])
digits.extend(chain(*zip(digt1_repr, digt2_repr)))
digits.extend(['w', 'n s', 'n'])
result = []
for digit in digits:
result.append('<span class="{0}"></span>'.format(digit))
return ''.join(result)
def _grouper(self, n, iterable, fillvalue=None):
"""grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"""
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
| 35.926829
| 77
| 0.617011
| 1,194
| 10,311
| 5.156616
| 0.237018
| 0.050024
| 0.027611
| 0.043853
| 0.410102
| 0.404743
| 0.376969
| 0.337502
| 0.335066
| 0.318824
| 0
| 0.009436
| 0.260014
| 10,311
| 286
| 78
| 36.052448
| 0.797379
| 0.212201
| 0
| 0.291667
| 0
| 0
| 0.118981
| 0.016137
| 0
| 0
| 0
| 0.003497
| 0
| 1
| 0.089286
| false
| 0
| 0.047619
| 0
| 0.172619
| 0.011905
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9ba3e40007f306c4c070fefef8a9b0aa2387204
| 363
|
py
|
Python
|
src/fetchWords.py
|
theyadev/thierry-bot
|
f3c72998d4c16afbca77baf4cabaf0f547d51e94
|
[
"MIT"
] | null | null | null |
src/fetchWords.py
|
theyadev/thierry-bot
|
f3c72998d4c16afbca77baf4cabaf0f547d51e94
|
[
"MIT"
] | 2
|
2022-01-20T16:36:33.000Z
|
2022-03-31T14:16:01.000Z
|
src/fetchWords.py
|
theyadev/thierry-bot
|
f3c72998d4c16afbca77baf4cabaf0f547d51e94
|
[
"MIT"
] | 1
|
2022-01-28T12:14:14.000Z
|
2022-01-28T12:14:14.000Z
|
import requests
words_list = requests.get("https://raw.githubusercontent.com/atebits/Words/master/Words/fr.txt").text
words_list = filter(lambda x: len(x) > 4, words_list.split('\n'))
path = input("Chemin d'écriture ? (words.txt) ")
if path == "":
path = "./words.txt"
with open(path, "w", encoding="utf-8") as file:
file.write('\n'.join(words_list))
| 27.923077
| 101
| 0.672176
| 56
| 363
| 4.285714
| 0.660714
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006309
| 0.126722
| 363
| 13
| 102
| 27.923077
| 0.750789
| 0
| 0
| 0
| 0
| 0
| 0.32967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9ba8bca5b7327bbb7e6554d0a3849c186cc4ba9
| 1,623
|
py
|
Python
|
inspiration/simplegallery/test/upload/variants/test_aws_uploader.py
|
Zenahr/simple-music-gallery
|
2cf6e81208b721a91dcbf77e047c7f77182dd194
|
[
"MIT"
] | 1
|
2020-07-03T17:21:01.000Z
|
2020-07-03T17:21:01.000Z
|
simplegallery/test/upload/variants/test_aws_uploader.py
|
theemack/simple-photo-gallery
|
f5db98bca7a7443ea7a9172317811f446eff760c
|
[
"MIT"
] | 1
|
2020-06-20T12:13:00.000Z
|
2020-06-20T15:32:03.000Z
|
inspiration/simplegallery/test/upload/variants/test_aws_uploader.py
|
Zenahr/simple-music-gallery
|
2cf6e81208b721a91dcbf77e047c7f77182dd194
|
[
"MIT"
] | null | null | null |
import unittest
from unittest import mock
import os
import subprocess
from testfixtures import TempDirectory
from simplegallery.upload.uploader_factory import get_uploader
class AWSUploaderTestCase(unittest.TestCase):
def test_no_location(self):
uploader = get_uploader('aws')
self.assertFalse(uploader.check_location(''))
@mock.patch('subprocess.run')
def test_upload_gallery(self, subprocess_run):
subprocess_run.return_value = subprocess.CompletedProcess([], returncode=0)
with TempDirectory() as tempdir:
# Setup mock file and uploader
tempdir.write('index.html', b'')
gallery_path = os.path.join(tempdir.path, 'index.html')
uploader = get_uploader('aws')
# Test upload to bucket
uploader.upload_gallery('s3://testbucket/path/', gallery_path)
subprocess_run.assert_called_with(
['aws', 's3', 'sync', gallery_path, 's3://testbucket/path/', '--exclude', '.DS_Store'])
# Test upload to bucket without prefix
uploader.upload_gallery('testbucket/path/', gallery_path)
subprocess_run.assert_called_with(
['aws', 's3', 'sync', gallery_path, 's3://testbucket/path/', '--exclude', '.DS_Store'])
# Test upload to bucket without trailing /
uploader.upload_gallery('s3://testbucket/path', gallery_path)
subprocess_run.assert_called_with(
['aws', 's3', 'sync', gallery_path, 's3://testbucket/path/', '--exclude', '.DS_Store'])
if __name__ == '__main__':
unittest.main()
| 37.744186
| 103
| 0.646334
| 178
| 1,623
| 5.657303
| 0.337079
| 0.076465
| 0.079444
| 0.053625
| 0.405164
| 0.405164
| 0.405164
| 0.405164
| 0.405164
| 0.405164
| 0
| 0.007183
| 0.227973
| 1,623
| 42
| 104
| 38.642857
| 0.796488
| 0.078866
| 0
| 0.285714
| 0
| 0
| 0.167114
| 0.056376
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9be3b65d403b8ba23a315dd5e1dcfb9fd542171
| 2,553
|
py
|
Python
|
tests/syncdb_signals/tests.py
|
mdj2/django
|
e71b63e280559122371d125d75a593dc2435c394
|
[
"BSD-3-Clause"
] | 1
|
2017-02-08T15:13:43.000Z
|
2017-02-08T15:13:43.000Z
|
tests/syncdb_signals/tests.py
|
mdj2/django
|
e71b63e280559122371d125d75a593dc2435c394
|
[
"BSD-3-Clause"
] | null | null | null |
tests/syncdb_signals/tests.py
|
mdj2/django
|
e71b63e280559122371d125d75a593dc2435c394
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db.models import signals
from django.test import TestCase
from django.core import management
from django.utils import six
from shared_models import models
PRE_SYNCDB_ARGS = ['app', 'create_models', 'verbosity', 'interactive', 'db']
SYNCDB_DATABASE = 'default'
SYNCDB_VERBOSITY = 1
SYNCDB_INTERACTIVE = False
class PreSyncdbReceiver(object):
def __init__(self):
self.call_counter = 0
self.call_args = None
def __call__(self, signal, sender, **kwargs):
self.call_counter = self.call_counter + 1
self.call_args = kwargs
class OneTimeReceiver(object):
"""
Special receiver for handle the fact that test runner calls syncdb for
several databases and several times for some of them.
"""
def __init__(self):
self.call_counter = 0
self.call_args = None
def __call__(self, signal, sender, **kwargs):
# Although test runner calls syncdb for several databases,
# testing for only one of them is quite sufficient.
if kwargs['db'] == SYNCDB_DATABASE:
self.call_counter = self.call_counter + 1
self.call_args = kwargs
# we need to test only one call of syncdb
signals.pre_syncdb.disconnect(pre_syncdb_receiver, sender=models)
# We connect receiver here and not in unit test code because we need to
# connect receiver before test runner creates database. That is, sequence of
# actions would be:
#
# 1. Test runner imports this module.
# 2. We connect receiver.
# 3. Test runner calls syncdb for create default database.
# 4. Test runner execute our unit test code.
pre_syncdb_receiver = OneTimeReceiver()
signals.pre_syncdb.connect(pre_syncdb_receiver, sender=models)
class SyncdbSignalTests(TestCase):
def test_pre_syncdb_call_time(self):
self.assertEqual(pre_syncdb_receiver.call_counter, 1)
def test_pre_syncdb_args(self):
r = PreSyncdbReceiver()
signals.pre_syncdb.connect(r, sender=models)
management.call_command('syncdb', database=SYNCDB_DATABASE,
verbosity=SYNCDB_VERBOSITY, interactive=SYNCDB_INTERACTIVE,
load_initial_data=False, stdout=six.StringIO())
args = r.call_args
self.assertEqual(r.call_counter, 1)
self.assertEqual(set(args), set(PRE_SYNCDB_ARGS))
self.assertEqual(args['app'], models)
self.assertEqual(args['verbosity'], SYNCDB_VERBOSITY)
self.assertEqual(args['interactive'], SYNCDB_INTERACTIVE)
self.assertEqual(args['db'], 'default')
| 34.04
| 77
| 0.703486
| 328
| 2,553
| 5.277439
| 0.29878
| 0.057192
| 0.051993
| 0.02773
| 0.233391
| 0.18602
| 0.18602
| 0.139804
| 0.139804
| 0.139804
| 0
| 0.005462
| 0.211124
| 2,553
| 74
| 78
| 34.5
| 0.854022
| 0.236584
| 0
| 0.27907
| 0
| 0
| 0.044294
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 1
| 0.139535
| false
| 0
| 0.116279
| 0
| 0.325581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9be5eda54c6b03914f01c88d3b8d97dd5add586
| 3,625
|
py
|
Python
|
pytorch_lightning/plugins/environments/slurm_environment.py
|
gianscarpe/pytorch-lightning
|
261ea90822e2bf1cfa5d56171ab1f95a81d5c571
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/plugins/environments/slurm_environment.py
|
gianscarpe/pytorch-lightning
|
261ea90822e2bf1cfa5d56171ab1f95a81d5c571
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/plugins/environments/slurm_environment.py
|
gianscarpe/pytorch-lightning
|
261ea90822e2bf1cfa5d56171ab1f95a81d5c571
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
log = logging.getLogger(__name__)
class SLURMEnvironment(ClusterEnvironment):
"""Cluster environment for training on a cluster managed by SLURM."""
@property
def creates_processes_externally(self) -> bool:
return True
@staticmethod
def detect() -> bool:
"""Returns ``True`` if the current process was launched on a SLURM cluster."""
return "SLURM_NTASKS" in os.environ
@property
def main_address(self) -> str:
# figure out the root node addr
slurm_nodelist = os.environ.get("SLURM_NODELIST")
if slurm_nodelist:
root_node = slurm_nodelist.split(" ")[0].split(",")[0]
else:
root_node = "127.0.0.1"
root_node = self.resolve_root_node_address(root_node)
os.environ["MASTER_ADDR"] = root_node
log.debug(f"MASTER_ADDR: {os.environ['MASTER_ADDR']}")
return root_node
@property
def main_port(self) -> int:
# -----------------------
# SLURM JOB = PORT number
# -----------------------
# this way every process knows what port to use
default_port = os.environ.get("SLURM_JOB_ID")
if default_port:
# use the last 4 numbers in the job id as the id
default_port = default_port[-4:]
# all ports should be in the 10k+ range
default_port = int(default_port) + 15000
else:
default_port = 12910
# -----------------------
# PORT NUMBER = MASTER_PORT
# -----------------------
# in case the user passed it in
if "MASTER_PORT" in os.environ:
default_port = os.environ["MASTER_PORT"]
else:
os.environ["MASTER_PORT"] = str(default_port)
log.debug(f"MASTER_PORT: {os.environ['MASTER_PORT']}")
return int(default_port)
def world_size(self) -> int:
return int(os.environ["SLURM_NTASKS"])
def set_world_size(self, size: int) -> None:
log.debug("SLURMEnvironment.set_world_size was called, but setting world size is not allowed. Ignored.")
def global_rank(self) -> int:
return int(os.environ["SLURM_PROCID"])
def set_global_rank(self, rank: int) -> None:
log.debug("SLURMEnvironment.set_global_rank was called, but setting global rank is not allowed. Ignored.")
def local_rank(self) -> int:
return int(os.environ["SLURM_LOCALID"])
def node_rank(self) -> int:
return int(os.environ["SLURM_NODEID"])
def resolve_root_node_address(self, root_node: str) -> str:
if "[" in root_node:
name, numbers = root_node.split("[", maxsplit=1)
number = numbers.split(",", maxsplit=1)[0]
if "-" in number:
number = number.split("-")[0]
number = re.sub("[^0-9]", "", number)
root_node = name + number
return root_node
| 33.878505
| 114
| 0.622345
| 466
| 3,625
| 4.693133
| 0.345494
| 0.051212
| 0.034294
| 0.029264
| 0.132602
| 0.091449
| 0.060357
| 0.046639
| 0
| 0
| 0
| 0.011839
| 0.254345
| 3,625
| 106
| 115
| 34.198113
| 0.797262
| 0.285241
| 0
| 0.137931
| 0
| 0
| 0.163082
| 0.045757
| 0
| 0
| 0
| 0
| 0
| 1
| 0.189655
| false
| 0
| 0.068966
| 0.086207
| 0.431034
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9be639438e84e867c9e53c267b847b31292fe23
| 928
|
py
|
Python
|
examples/mouse.py
|
ginkage/trackball-python
|
06439ac77935f7fd9374bd4f535822e859734729
|
[
"MIT"
] | 22
|
2019-04-19T11:13:16.000Z
|
2022-03-04T15:04:43.000Z
|
examples/mouse.py
|
ginkage/trackball-python
|
06439ac77935f7fd9374bd4f535822e859734729
|
[
"MIT"
] | 7
|
2019-06-17T13:48:41.000Z
|
2022-02-07T14:24:00.000Z
|
examples/mouse.py
|
ginkage/trackball-python
|
06439ac77935f7fd9374bd4f535822e859734729
|
[
"MIT"
] | 6
|
2019-04-24T00:58:29.000Z
|
2022-01-26T15:39:10.000Z
|
#!/usr/bin/env python
import time
import os
import math
from trackball import TrackBall
print("""Trackball: Mouse
Use the trackball as a mouse in Raspbian, with right-click
when the switch is pressed.
Press Ctrl+C to exit!
""")
trackball = TrackBall(interrupt_pin=4)
trackball.set_rgbw(0, 0, 0, 0)
# Check for xte (used to control mouse)
use_xte = os.system('which xte') == 0
if use_xte == 0:
raise RuntimeError("xte not found. Did you sudo apt install xautomation?")
while True:
up, down, left, right, switch, state = trackball.read()
# Send movements and clicks to xte
if switch:
cmd = 'xte "mouseclick 1"'
os.system(cmd)
elif right or up or left or down:
x = right - left
x = math.copysign(x**2, x)
y = down - up
y = math.copysign(y**2, y)
cmd = 'xte "mousermove {} {}"'.format(int(x), int(y))
os.system(cmd)
time.sleep(0.0001)
| 23.2
| 78
| 0.635776
| 145
| 928
| 4.041379
| 0.544828
| 0.010239
| 0.010239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021368
| 0.243534
| 928
| 39
| 79
| 23.794872
| 0.81339
| 0.09806
| 0
| 0.074074
| 0
| 0
| 0.27458
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.148148
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9be866c44b7b03225042353a7fcf648c1ce10ab
| 11,294
|
py
|
Python
|
garaged/src/garage/tf/regressors/gaussian_mlp_regressor_model.py
|
artberryx/LSD
|
99ee081de2502b4d13c140b474f772db8a5f92fe
|
[
"MIT"
] | 7
|
2022-02-01T03:02:24.000Z
|
2022-02-10T12:54:05.000Z
|
garaged/src/garage/tf/regressors/gaussian_mlp_regressor_model.py
|
artberryx/LSD
|
99ee081de2502b4d13c140b474f772db8a5f92fe
|
[
"MIT"
] | null | null | null |
garaged/src/garage/tf/regressors/gaussian_mlp_regressor_model.py
|
artberryx/LSD
|
99ee081de2502b4d13c140b474f772db8a5f92fe
|
[
"MIT"
] | 2
|
2022-02-03T03:33:25.000Z
|
2022-02-10T12:54:07.000Z
|
"""GaussianMLPRegressorModel."""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from garage.experiment import deterministic
from garage.tf.models import GaussianMLPModel
class GaussianMLPRegressorModel(GaussianMLPModel):
"""GaussianMLPRegressor based on garage.tf.models.Model class.
This class can be used to perform regression by fitting a Gaussian
distribution to the outputs.
Args:
input_shape (tuple[int]): Input shape of the training data.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s) in the std network.
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s) in the std network.
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a tf.Tensor. Set
it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
input_shape,
output_dim,
name='GaussianMLPRegressorModel',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_hidden_b_init=tf.zeros_initializer(),
std_output_nonlinearity=None,
std_output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_parameterization='exp',
layer_normalization=False):
super().__init__(output_dim=output_dim,
name=name,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
adaptive_std=adaptive_std,
std_share_network=std_share_network,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_hidden_nonlinearity,
std_output_nonlinearity=std_output_nonlinearity,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization)
self._input_shape = input_shape
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'normalized_dist', 'normalized_mean', 'normalized_log_std', 'dist',
'mean', 'log_std', 'x_mean', 'x_std', 'y_mean', 'y_std'
]
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tfp.distributions.MultivariateNormalDiag: Normlizaed distribution.
tf.Tensor: Normalized mean.
tf.Tensor: Normalized log_std.
tfp.distributions.MultivariateNormalDiag: Vanilla distribution.
tf.Tensor: Vanilla mean.
tf.Tensor: Vanilla log_std.
tf.Tensor: Mean for data.
tf.Tensor: log_std for data.
tf.Tensor: Mean for label.
tf.Tensor: log_std for label.
"""
with tf.compat.v1.variable_scope('normalized_vars'):
x_mean_var = tf.compat.v1.get_variable(
name='x_mean',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
x_std_var = tf.compat.v1.get_variable(
name='x_std_var',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
y_mean_var = tf.compat.v1.get_variable(
name='y_mean_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
y_std_var = tf.compat.v1.get_variable(
name='y_std_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
normalized_xs_var = (state_input - x_mean_var) / x_std_var
_, normalized_dist_mean, normalized_dist_log_std = super()._build(
normalized_xs_var)
# Since regressor expects [N, *dims], we need to squeeze the extra
# dimension
normalized_dist_log_std = tf.squeeze(normalized_dist_log_std, 1)
with tf.name_scope('mean_network'):
means_var = normalized_dist_mean * y_std_var + y_mean_var
with tf.name_scope('std_network'):
log_stds_var = normalized_dist_log_std + tf.math.log(y_std_var)
normalized_dist = tfp.distributions.MultivariateNormalDiag(
loc=normalized_dist_mean,
scale_diag=tf.exp(normalized_dist_log_std))
vanilla_dist = tfp.distributions.MultivariateNormalDiag(
loc=means_var, scale_diag=tf.exp(log_stds_var))
return (normalized_dist, normalized_dist_mean, normalized_dist_log_std,
vanilla_dist, means_var, log_stds_var, x_mean_var, x_std_var,
y_mean_var, y_std_var)
def clone(self, name):
"""Return a clone of the model.
It copies the configuration and parameters of the primitive.
Args:
name (str): Name of the newly created model. It has to be
different from source model if cloned under the same
computational graph.
Returns:
garage.tf.policies.GaussianMLPModel: Newly cloned model.
"""
new_regressor = self.__class__(
name=name,
input_shape=self._input_shape,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
learn_std=self._learn_std,
adaptive_std=self._adaptive_std,
std_share_network=self._std_share_network,
init_std=self._init_std,
min_std=self._min_std,
max_std=self._max_std,
std_hidden_sizes=self._std_hidden_sizes,
std_hidden_nonlinearity=self._std_hidden_nonlinearity,
std_hidden_w_init=self._std_hidden_w_init,
std_hidden_b_init=self._std_hidden_b_init,
std_output_nonlinearity=self._std_output_nonlinearity,
std_output_w_init=self._std_output_w_init,
std_parameterization=self._std_parameterization,
layer_normalization=self._layer_normalization)
new_regressor.parameters = self.parameters
return new_regressor
| 44.81746
| 79
| 0.611741
| 1,340
| 11,294
| 4.86791
| 0.158209
| 0.01533
| 0.016863
| 0.016097
| 0.516787
| 0.37866
| 0.322858
| 0.311207
| 0.288518
| 0.237314
| 0
| 0.005616
| 0.322029
| 11,294
| 251
| 80
| 44.996016
| 0.846284
| 0.387108
| 0
| 0.167939
| 0
| 0
| 0.028767
| 0.003887
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030534
| false
| 0
| 0.038168
| 0
| 0.099237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9becf802ca0765623e481aef0b8fd051c0096e5
| 3,594
|
py
|
Python
|
test.py
|
kim-sunghoon/DiracDeltaNet
|
7bcc0575f28715d9c7f737f8a239718320f9c05b
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
kim-sunghoon/DiracDeltaNet
|
7bcc0575f28715d9c7f737f8a239718320f9c05b
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
kim-sunghoon/DiracDeltaNet
|
7bcc0575f28715d9c7f737f8a239718320f9c05b
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import os
import argparse
from torch.autograd import Variable
from extensions.utils import progress_bar
from extensions.model_refinery_wrapper import ModelRefineryWrapper
from extensions.refinery_loss import RefineryLoss
from models import ShuffleNetv2_wrapper
from models import DiracDeltaNet_wrapper
parser = argparse.ArgumentParser(description='PyTorch imagenet inference')
parser.add_argument('--datadir', help='path to dataset')
parser.add_argument('--inputdir', help='path to input model')
args = parser.parse_args()
# Data
print('==> Preparing data..')
# Data loading code
valdir = os.path.join(args.datadir, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_test = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
#imagenet
testset = datasets.ImageFolder(valdir, transform_test)
num_classes=1000
testloader = torch.utils.data.DataLoader(testset, batch_size=1000, shuffle=False, pin_memory=True, num_workers=30)
use_cuda = torch.cuda.is_available()
print('Using input path: %s' % args.inputdir)
checkpoint = torch.load(args.inputdir)
init_net = checkpoint['net']
net=init_net.to('cpu')
label_refinery=torch.load('./resnet50.t7')
net = ModelRefineryWrapper(net, label_refinery)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
net = nn.DataParallel(net)
net=net.to(device)
criterion = RefineryLoss()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k)
return res
def test():
net.eval()
criterion.eval()
test_loss = 0
correct_1 = 0
correct_5 = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(device), targets.cuda(device)
with torch.no_grad():
outputs = net(inputs)
loss = criterion(outputs, targets)
if isinstance(loss, tuple):
loss_value, outputs = loss
else:
loss_value = loss
test_loss += loss_value.item()
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
total += targets.size(0)
correct_1 += prec1
correct_5 += prec5
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*float(correct_1)/float(total), correct_1, total))
return 100.*float(correct_1)/float(total),100.*float(correct_5)/float(total),test_loss
acc1,acc5,loss=test()
print('top-1 accuracy: {0:.3f}%, top-5 accuracy: {1:.3f}%'.format(acc1,acc5))
| 28.983871
| 115
| 0.645242
| 461
| 3,594
| 4.91974
| 0.357918
| 0.02425
| 0.019841
| 0.017637
| 0.022928
| 0.022928
| 0
| 0
| 0
| 0
| 0
| 0.034021
| 0.231219
| 3,594
| 123
| 116
| 29.219512
| 0.786826
| 0.023929
| 0
| 0.023256
| 0
| 0
| 0.072846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.186047
| 0
| 0.232558
| 0.05814
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9c028ee1a5ced657b4755383e247cbb2fed35a8
| 416
|
py
|
Python
|
paccmann_chemistry/utils/hyperparams.py
|
PaccMann/paccmann_chemistry
|
f7e9735aafb936f837c38b5055c654be178f385f
|
[
"MIT"
] | 9
|
2019-11-06T10:39:15.000Z
|
2022-01-09T11:08:52.000Z
|
paccmann_chemistry/utils/hyperparams.py
|
PaccMann/paccmann_chemistry
|
f7e9735aafb936f837c38b5055c654be178f385f
|
[
"MIT"
] | 10
|
2019-11-06T17:33:51.000Z
|
2020-12-28T07:46:23.000Z
|
paccmann_chemistry/utils/hyperparams.py
|
PaccMann/paccmann_chemistry
|
f7e9735aafb936f837c38b5055c654be178f385f
|
[
"MIT"
] | 5
|
2020-08-13T15:00:57.000Z
|
2022-03-24T14:29:07.000Z
|
"""Model Parameters Module."""
import torch.optim as optim
from .search import SamplingSearch, GreedySearch, BeamSearch
SEARCH_FACTORY = {
'sampling': SamplingSearch,
'greedy': GreedySearch,
'beam': BeamSearch,
}
OPTIMIZER_FACTORY = {
'adadelta': optim.Adadelta,
'adagrad': optim.Adagrad,
'adam': optim.Adam,
'adamax': optim.Adamax,
'rmsprop': optim.RMSprop,
'sgd': optim.SGD
}
| 21.894737
| 60
| 0.675481
| 42
| 416
| 6.642857
| 0.547619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185096
| 416
| 18
| 61
| 23.111111
| 0.823009
| 0.057692
| 0
| 0
| 0
| 0
| 0.137306
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9c1c1059c5b91f27882844cb4c3becda27ebd7c
| 6,417
|
py
|
Python
|
tests/gpflux/layers/test_latent_variable_layer.py
|
francescodonato/GPflux
|
fe45b353243b31d9fa0ec0daeb1d39a2e78ba094
|
[
"Apache-2.0"
] | 100
|
2021-04-13T07:54:49.000Z
|
2022-03-21T16:25:45.000Z
|
tests/gpflux/layers/test_latent_variable_layer.py
|
francescodonato/GPflux
|
fe45b353243b31d9fa0ec0daeb1d39a2e78ba094
|
[
"Apache-2.0"
] | 17
|
2021-04-13T03:13:11.000Z
|
2022-02-28T07:36:55.000Z
|
tests/gpflux/layers/test_latent_variable_layer.py
|
francescodonato/GPflux
|
fe45b353243b31d9fa0ec0daeb1d39a2e78ba094
|
[
"Apache-2.0"
] | 13
|
2021-04-12T19:12:17.000Z
|
2022-03-10T00:41:44.000Z
|
#
# Copyright (c) 2021 The GPflux Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.kullback_leiblers import gauss_kl
from gpflux.encoders import DirectlyParameterizedNormalDiag
from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer
tf.keras.backend.set_floatx("float64")
############
# Utilities
############
def _zero_one_normal_prior(w_dim):
""" N(0, I) prior """
return tfp.distributions.MultivariateNormalDiag(loc=np.zeros(w_dim), scale_diag=np.ones(w_dim))
def get_distributions_with_w_dim():
distributions = []
for d in [1, 5]:
mean = np.zeros(d)
scale_tri_l = np.eye(d)
mvn = tfp.distributions.MultivariateNormalTriL(mean, scale_tri_l)
std = np.ones(d)
mvn_diag = tfp.distributions.MultivariateNormalDiag(mean, std)
distributions.append((mvn, d))
distributions.append((mvn_diag, d))
return distributions
############
# Tests
############
@pytest.mark.parametrize("distribution, w_dim", get_distributions_with_w_dim())
def test_local_kls(distribution, w_dim):
lv = LatentVariableLayer(encoder=None, prior=distribution)
# test kl is 0 when posteriors == priors
posterior = distribution
assert lv._local_kls(posterior) == 0
# test kl > 0 when posteriors != priors
batch_size = 10
params = distribution.parameters
posterior_params = {
k: [v + 0.5 for _ in range(batch_size)]
for k, v in params.items()
if isinstance(v, np.ndarray)
}
posterior = lv.distribution_class(**posterior_params)
local_kls = lv._local_kls(posterior)
assert np.all(local_kls > 0)
assert local_kls.shape == (batch_size,)
@pytest.mark.parametrize("w_dim", [1, 5])
def test_local_kl_gpflow_consistency(w_dim):
num_data = 400
means = np.random.randn(num_data, w_dim)
encoder = DirectlyParameterizedNormalDiag(num_data, w_dim, means)
lv = LatentVariableLayer(encoder=encoder, prior=_zero_one_normal_prior(w_dim))
posteriors = lv._inference_posteriors(
[np.random.randn(num_data, 3), np.random.randn(num_data, 2)]
)
q_mu = posteriors.parameters["loc"]
q_sqrt = posteriors.parameters["scale_diag"]
gpflow_local_kls = gauss_kl(q_mu, q_sqrt)
tfp_local_kls = tf.reduce_sum(lv._local_kls(posteriors))
np.testing.assert_allclose(tfp_local_kls, gpflow_local_kls, rtol=1e-10)
class ArrayMatcher:
def __init__(self, expected):
self.expected = expected
def __eq__(self, actual):
return np.allclose(actual, self.expected, equal_nan=True)
@pytest.mark.parametrize("w_dim", [1, 5])
def test_latent_variable_layer_losses(mocker, w_dim):
num_data, x_dim, y_dim = 43, 3, 1
prior_shape = (w_dim,)
posteriors_shape = (num_data, w_dim)
prior = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*prior_shape),
scale_diag=np.random.randn(*prior_shape) ** 2,
)
posteriors = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*posteriors_shape),
scale_diag=np.random.randn(*posteriors_shape) ** 2,
)
encoder = mocker.Mock(return_value=(posteriors.loc, posteriors.scale.diag))
lv = LatentVariableLayer(encoder=encoder, prior=prior)
inputs = np.full((num_data, x_dim), np.nan)
targets = np.full((num_data, y_dim), np.nan)
observations = [inputs, targets]
encoder_inputs = np.concatenate(observations, axis=-1)
_ = lv(inputs)
encoder.assert_not_called()
assert lv.losses == [0.0]
_ = lv(inputs, observations=observations, training=True)
# assert_called_once_with uses == for comparison which fails on arrays
encoder.assert_called_once_with(ArrayMatcher(encoder_inputs), training=True)
expected_loss = [tf.reduce_mean(posteriors.kl_divergence(prior))]
np.testing.assert_equal(lv.losses, expected_loss) # also checks shapes match
@pytest.mark.parametrize("w_dim", [1, 5])
@pytest.mark.parametrize("seed2", [None, 42])
def test_latent_variable_layer_samples(mocker, test_data, w_dim, seed2):
seed = 123
inputs, targets = test_data
num_data, x_dim = inputs.shape
prior_shape = (w_dim,)
posteriors_shape = (num_data, w_dim)
prior = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*prior_shape),
scale_diag=np.random.randn(*prior_shape) ** 2,
)
posteriors = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*posteriors_shape),
scale_diag=np.random.randn(*posteriors_shape) ** 2,
)
encoder = mocker.Mock(return_value=(posteriors.loc, posteriors.scale.diag))
lv = LatentVariableLayer(prior=prior, encoder=encoder)
tf.random.set_seed(seed)
sample_prior = lv(inputs, seed=seed2)
tf.random.set_seed(seed)
prior_expected = np.concatenate([inputs, prior.sample(num_data, seed=seed2)], axis=-1)
np.testing.assert_array_equal(sample_prior, prior_expected)
tf.random.set_seed(seed)
sample_posterior = lv(inputs, observations=[inputs, targets], training=True, seed=seed2)
tf.random.set_seed(seed)
posterior_expected = np.concatenate([inputs, posteriors.sample(seed=seed2)], axis=-1)
np.testing.assert_array_equal(sample_posterior, posterior_expected)
def test_no_tensorflow_metaclass_overwritten():
"""
LayerWithObservations is a subclass of tf.keras.layers.Layer (via TrackableLayer);
this test ensures that TrackableLayer does not have a metaclass, and hence by adding
the ABCMeta to LayerWithObservations we are not accidentally removing some required
TensorFlow magic metaclass.
"""
assert LayerWithObservations.__bases__ == (TrackableLayer,)
assert type(TrackableLayer) is type
assert type(LayerWithObservations) is abc.ABCMeta
| 32.573604
| 99
| 0.717781
| 848
| 6,417
| 5.21934
| 0.275943
| 0.018075
| 0.032309
| 0.046317
| 0.286941
| 0.229327
| 0.208089
| 0.189336
| 0.189336
| 0.173972
| 0
| 0.011094
| 0.171264
| 6,417
| 196
| 100
| 32.739796
| 0.821173
| 0.163316
| 0
| 0.217391
| 0
| 0
| 0.011202
| 0
| 0
| 0
| 0
| 0
| 0.113043
| 1
| 0.078261
| false
| 0
| 0.069565
| 0.008696
| 0.182609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9c21d6803d82661080e36eb0e94a3b82f8b2f7c
| 18,041
|
py
|
Python
|
aw-actor-trust.py
|
actingweb/box-actingweb
|
f586458484649aba927cd78c60b4d0fec7b82ca6
|
[
"Apache-2.0"
] | null | null | null |
aw-actor-trust.py
|
actingweb/box-actingweb
|
f586458484649aba927cd78c60b4d0fec7b82ca6
|
[
"Apache-2.0"
] | null | null | null |
aw-actor-trust.py
|
actingweb/box-actingweb
|
f586458484649aba927cd78c60b4d0fec7b82ca6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
from actingweb import actor
from actingweb import config
from actingweb import trust
from actingweb import auth
import webapp2
import os
from google.appengine.ext.webapp import template
import json
import logging
import datetime
import time
# /trust handlers
#
# GET /trust with query parameters (relationship, type, and peerid) to retrieve trust relationships (auth: only creator and admins allowed)
# POST /trust with json body to initiate a trust relationship between this
# actor and another (reciprocal relationship) (auth: only creator and admins allowed)
# POST /trust/{relationship} with json body to create new trust
# relationship (see config.py for default relationship and auto-accept, no
# auth required)
# GET /trust/{relationship}}/{actorid} to get details on a specific relationship (auth: creator, admin, or peer secret)
# POST /trust/{relationship}}/{actorid} to send information to a peer about changes in the relationship
# PUT /trust/{relationship}}/{actorid} with a json body to change details on a relationship (baseuri, secret, desc) (auth: creator,
# admin, or peer secret)
# DELETE /trust/{relationship}}/{actorid} to delete a relationship (with
# ?peer=true if the delete is from the peer) (auth: creator, admin, or
# peer secret)
# Handling requests to trust/
class rootHandler(webapp2.RequestHandler):
def get(self, id):
if self.request.get('_method') == 'POST':
self.post(id)
return
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='trust')
if not myself or check.response["code"] != 200:
return
if not check.checkAuthorisation(path='trust', method='GET'):
self.response.set_status(403)
return
relationship = ''
type = ''
peerid = ''
relationship = self.request.get('relationship')
type = self.request.get('type')
peerid = self.request.get('peerid')
relationships = myself.getTrustRelationships(
relationship=relationship, peerid=peerid, type=type)
if not relationships:
self.response.set_status(404, 'Not found')
return
pairs = []
for rel in relationships:
pairs.append({
'baseuri': rel.baseuri,
'id': myself.id,
'peerid': rel.peerid,
'relationship': rel.relationship,
'approved': rel.approved,
'peer_approved': rel.peer_approved,
'verified': rel.verified,
'type': rel.type,
'desc': rel.desc,
'secret': rel.secret,
})
out = json.dumps(pairs)
self.response.write(out)
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(200, 'Ok')
def post(self, id):
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='trust')
if not myself or check.response["code"] != 200:
return
if not check.checkAuthorisation(path='trust', method='POST'):
self.response.set_status(403)
return
secret = ''
desc = ''
relationship = Config.default_relationship
type = ''
try:
params = json.loads(self.request.body.decode('utf-8', 'ignore'))
if 'url' in params:
url = params['url']
else:
url = ''
if 'relationship' in params:
relationship = params['relationship']
if 'type' in params:
type = params['type']
if 'desc' in params:
desc = params['desc']
except ValueError:
url = self.request.get('url')
relationship = self.request.get('relationship')
type = self.request.get('type')
if len(url) == 0:
self.response.set_status(400, 'Missing peer URL')
return
secret = Config.newToken()
new_trust = myself.createReciprocalTrust(
url=url, secret=secret, desc=desc, relationship=relationship, type=type)
if not new_trust:
self.response.set_status(408, 'Unable to create trust relationship')
return
self.response.headers.add_header(
"Location", str(Config.root + myself.id + '/trust/' + new_trust.relationship + '/' + new_trust.peerid))
pair = {
'baseuri': new_trust.baseuri,
'id': myself.id,
'peerid': new_trust.peerid,
'relationship': new_trust.relationship,
'approved': new_trust.approved,
'peer_approved': new_trust.peer_approved,
'verified': new_trust.verified,
'type': new_trust.type,
'desc': new_trust.desc,
'secret': new_trust.secret,
}
out = json.dumps(pair)
self.response.write(out)
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(201, 'Created')
# Handling requests to /trust/*, e.g. /trust/friend
class relationshipHandler(webapp2.RequestHandler):
def get(self, id, relationship):
if self.request.get('_method') == 'POST':
self.post(id, relationship)
return
self.response.set_status(404, "Not found")
def put(self, id, relationship):
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='trust', subpath=relationship, add_response=False)
if not myself:
return
if relationship != 'trustee':
self.response.set_status(404, "Not found")
return
# Access is the same as /trust
if not check.checkAuthorisation(path='trust', method='POST'):
self.response.set_status(403)
return
try:
params = json.loads(self.request.body.decode('utf-8', 'ignore'))
if 'trustee_root' in params:
trustee_root = params['trustee_root']
else:
trustee_root = ''
if 'creator' in params:
creator = params['creator']
else:
creator = None
except ValueError:
self.response.set_status(400, 'No json content')
return
if len(trustee_root) > 0:
myself.setProperty('trustee_root', trustee_root)
if creator:
myself.modify(creator=creator)
self.response.set_status(204, 'No content')
def delete(self, id, relationship):
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='trust',
subpath=relationship,
add_response=False)
if not myself:
return
if relationship != 'trustee':
self.response.set_status(404, "Not found")
return
# Access is the same as /trust
if not check.checkAuthorisation(path='trust', method='DELETE'):
self.response.set_status(403)
return
myself.deleteProperty('trustee_root')
self.response.set_status(204, 'No content')
def post(self, id, relationship):
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='trust',
subpath=relationship,
add_response=False)
if not myself:
return
if not check.checkAuthorisation(path='trust', subpath='<type>', method='POST'):
self.response.set_status(403)
return
try:
params = json.loads(self.request.body.decode('utf-8', 'ignore'))
if 'baseuri' in params:
baseuri = params['baseuri']
else:
baseuri = ''
if 'id' in params:
peerid = params['id']
else:
peerid = ''
if 'type' in params:
type = params['type']
else:
type = ''
if 'secret' in params:
secret = params['secret']
else:
secret = ''
if 'desc' in params:
desc = params['desc']
else:
desc = ''
if 'verify' in params:
verificationToken = params['verify']
else:
verificationToken = None
except ValueError:
self.response.set_status(400, 'No json content')
return
if len(baseuri) == 0 or len(peerid) == 0 or len(type) == 0:
self.response.set_status(400, 'Missing mandatory attributes')
return
if Config.auto_accept_default_relationship and Config.default_relationship == relationship:
approved = True
else:
approved = False
# Since we received a request for a relationship, assume that peer has approved
new_trust = myself.createVerifiedTrust(baseuri=baseuri, peerid=peerid, approved=approved, secret=secret,
verificationToken=verificationToken, type=type, peer_approved=True, relationship=relationship, desc=desc)
if not new_trust:
self.response.set_status(403, 'Forbidden')
return
self.response.headers.add_header(
"Location", str(Config.root + myself.id + '/trust/' + new_trust.relationship + "/" + new_trust.peerid))
pair = {
'baseuri': new_trust.baseuri,
'id': myself.id,
'peerid': new_trust.peerid,
'relationship': new_trust.relationship,
'approved': new_trust.approved,
'peer_approved': new_trust.peer_approved,
'verified': new_trust.verified,
'type': new_trust.type,
'desc': new_trust.desc,
'secret': new_trust.secret,
}
out = json.dumps(pair)
self.response.write(out)
self.response.headers["Content-Type"] = "application/json"
if approved:
self.response.set_status(201, 'Created')
else:
self.response.set_status(202, 'Accepted')
# Handling requests to specific relationships, e.g. /trust/friend/12f2ae53bd
class trustHandler(webapp2.RequestHandler):
def get(self, id, relationship, peerid):
if self.request.get('_method') == 'PUT':
self.put(id, relationship, peerid)
return
if self.request.get('_method') == 'DELETE':
self.delete(id, relationship, peerid)
return
logging.debug('GET trust headers: ' + str(self.request.headers))
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='trust', subpath=relationship)
if not myself or check.response["code"] != 200:
return
if not check.checkAuthorisation(path='trust', subpath='<type>/<id>', method='GET', peerid=peerid):
self.response.set_status(403)
return
relationships = myself.getTrustRelationships(
relationship=relationship, peerid=peerid)
if not relationships:
self.response.set_status(404, 'Not found')
return
my_trust = relationships[0]
# If the peer did a GET to verify
if check.trust and check.trust.peerid == peerid and not my_trust.verified:
my_trust.modify(verified=True)
verificationToken = my_trust.verificationToken
else:
verificationToken = ''
pair = {
'baseuri': my_trust.baseuri,
'id': myself.id,
'peerid': my_trust.peerid,
'relationship': my_trust.relationship,
'approved': my_trust.approved,
'peer_approved': my_trust.peer_approved,
'verified': my_trust.verified,
'verificationToken': verificationToken,
'type': my_trust.type,
'desc': my_trust.desc,
'secret': my_trust.secret,
}
out = json.dumps(pair)
self.response.write(out)
self.response.headers["Content-Type"] = "application/json"
if my_trust.approved:
self.response.set_status(200, 'Ok')
else:
self.response.set_status(202, 'Accepted')
def post(self, id, relationship, peerid):
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='trust', subpath=relationship)
if not myself or check.response["code"] != 200:
return
if not check.checkAuthorisation(path='trust', subpath='<type>/<id>', method='POST', peerid=peerid):
self.response.set_status(403)
return
try:
params = json.loads(self.request.body.decode('utf-8', 'ignore'))
peer_approved = None
if 'approved' in params:
if params['approved'] and params['approved'] == True:
peer_approved = True
except ValueError:
self.response.set_status(400, 'No json content')
return
if myself.modifyTrustAndNotify(relationship=relationship, peerid=peerid, peer_approved=peer_approved):
self.response.set_status(204, 'Ok')
else:
self.response.set_status(405, 'Not modified')
def put(self, id, relationship, peerid):
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='trust', subpath=relationship)
if not myself or check.response["code"] != 200:
return
if not check.checkAuthorisation(path='trust', subpath='<type>/<id>', method='PUT', peerid=peerid):
self.response.set_status(403)
return
try:
params = json.loads(self.request.body.decode('utf-8', 'ignore'))
if 'baseuri' in params:
baseuri = params['baseuri']
else:
baseuri = ''
if 'desc' in params:
desc = params['desc']
else:
desc = ''
if 'approved' in params:
if params['approved'] == True or params['approved'].lower() == "true":
approved = True
else:
approved = None
except ValueError:
if not self.request.get('_method') or self.request.get('_method') != "PUT":
self.response.set_status(400, 'No json content')
return
if self.request.get('approved') and len(self.request.get('approved')) > 0:
if self.request.get('approved').lower() == "true":
approved = True
else:
approved = None
if self.request.get('baseuri') and len(self.request.get('baseuri')) > 0:
baseuri = self.request.get('baseuri')
else:
baseuri = ''
if self.request.get('desc') and len(self.request.get('desc')) > 0:
desc = self.request.get('desc')
else:
desc = ''
if myself.modifyTrustAndNotify(relationship=relationship, peerid=peerid, baseuri=baseuri, approved=approved, desc=desc):
self.response.set_status(204, 'Ok')
else:
self.response.set_status(405, 'Not modified')
def delete(self, id, relationship, peerid):
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='trust', subpath=relationship, add_response=False)
if not myself or (check.response["code"] != 200 and check.response["code"] != 401):
auth.add_auth_response(appreq=self, auth_obj=check)
return
# We allow non-approved peers to delete even if we haven't approved the relationship yet
if not check.checkAuthorisation(path='trust', subpath='<type>/<id>', method='DELETE', peerid=peerid, approved=False):
self.response.set_status(403)
return
isPeer = False
if check.trust and check.trust.peerid == peerid:
isPeer = True
else:
# Use of GET param peer=true is a way of forcing no deletion of a peer
# relationship even when requestor is not a peer (primarily for testing purposes)
peerGet = self.request.get('peer').lower()
if peerGet.lower() == "true":
isPeer = True
Config = config.config()
relationships = myself.getTrustRelationships(
relationship=relationship, peerid=peerid)
if not relationships:
self.response.set_status(404, 'Not found')
return
my_trust = relationships[0]
if isPeer:
deleted = myself.deleteReciprocalTrust(peerid=peerid, deletePeer=False)
else:
deleted = myself.deleteReciprocalTrust(peerid=peerid, deletePeer=True)
if not deleted:
self.response.set_status(502, 'Not able to delete relationship with peer.')
return
self.response.set_status(204, 'Ok')
application = webapp2.WSGIApplication([
webapp2.Route(r'/<id>/trust<:/?>', rootHandler, name='rootHandler'),
webapp2.Route(r'/<id>/trust/<relationship><:/?>',
relationshipHandler, name='relationshipHandler'),
webapp2.Route(r'/<id>/trust/<relationship>/<peerid><:/?>', trustHandler, name='trustHandler'),
], debug=True)
| 41.955814
| 152
| 0.558228
| 1,868
| 18,041
| 5.322805
| 0.114026
| 0.056723
| 0.055818
| 0.078145
| 0.615307
| 0.590969
| 0.534748
| 0.473398
| 0.428342
| 0.416474
| 0
| 0.013286
| 0.332465
| 18,041
| 429
| 153
| 42.053613
| 0.812339
| 0.087357
| 0
| 0.616402
| 0
| 0
| 0.097518
| 0.004319
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026455
| false
| 0
| 0.029101
| 0
| 0.15873
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9c32f78fb7ce24035473595e0a40c4945453a5b
| 2,465
|
py
|
Python
|
classy_vision/heads/fully_connected_head.py
|
dlegor/ClassyVision
|
9c82d533b66b0a5fbb11f8ab3567a9c70aa4e013
|
[
"MIT"
] | 1
|
2021-04-11T19:01:10.000Z
|
2021-04-11T19:01:10.000Z
|
classy_vision/heads/fully_connected_head.py
|
prigoyal/ClassyVision
|
db87bb87068ee8d2c7b21849ddd0548082e20a87
|
[
"MIT"
] | null | null | null |
classy_vision/heads/fully_connected_head.py
|
prigoyal/ClassyVision
|
db87bb87068ee8d2c7b21849ddd0548082e20a87
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch.nn as nn
from classy_vision.generic.util import is_pos_int
from classy_vision.heads import ClassyHead, register_head
@register_head("fully_connected")
class FullyConnectedHead(ClassyHead):
"""This head defines a 2d average pooling layer
(:class:`torch.nn.AdaptiveAvgPool2d`) followed by a fully connected
layer (:class:`torch.nn.Linear`).
"""
def __init__(
self,
unique_id: str,
num_classes: int,
in_plane: int,
zero_init_bias: bool = False,
):
"""Constructor for FullyConnectedHead
Args:
unique_id: A unique identifier for the head. Multiple instances of
the same head might be attached to a model, and unique_id is used
to refer to them.
num_classes: Number of classes for the head. If None, then the fully
connected layer is not applied.
in_plane: Input size for the fully connected layer.
"""
super().__init__(unique_id, num_classes)
assert num_classes is None or is_pos_int(num_classes)
assert is_pos_int(in_plane)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = None if num_classes is None else nn.Linear(in_plane, num_classes)
if zero_init_bias:
self.fc.bias.data.zero_()
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "FullyConnectedHead":
"""Instantiates a FullyConnectedHead from a configuration.
Args:
config: A configuration for a FullyConnectedHead.
See :func:`__init__` for parameters expected in the config.
Returns:
A FullyConnectedHead instance.
"""
num_classes = config.get("num_classes", None)
in_plane = config["in_plane"]
return cls(
config["unique_id"],
num_classes,
in_plane,
zero_init_bias=config.get("zero_init_bias", False),
)
def forward(self, x):
# perform average pooling:
out = self.avgpool(x)
# final classifier:
out = out.reshape(out.size(0), -1)
if self.fc is not None:
out = self.fc(out)
return out
| 31.602564
| 83
| 0.628398
| 317
| 2,465
| 4.712934
| 0.391167
| 0.066934
| 0.032129
| 0.022758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004577
| 0.290872
| 2,465
| 77
| 84
| 32.012987
| 0.850114
| 0.403245
| 0
| 0
| 0
| 0
| 0.056732
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.083333
| false
| 0
| 0.111111
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9c3ac1232aa677a1999a869a726247c9e688214
| 3,400
|
py
|
Python
|
custom_components/wyzeapi/binary_sensor.py
|
np-hacs/ha-wyzeapi
|
8abc6af59d36514008f696310b290a046d7c7a72
|
[
"Apache-2.0"
] | null | null | null |
custom_components/wyzeapi/binary_sensor.py
|
np-hacs/ha-wyzeapi
|
8abc6af59d36514008f696310b290a046d7c7a72
|
[
"Apache-2.0"
] | null | null | null |
custom_components/wyzeapi/binary_sensor.py
|
np-hacs/ha-wyzeapi
|
8abc6af59d36514008f696310b290a046d7c7a72
|
[
"Apache-2.0"
] | null | null | null |
import logging
import time
from datetime import timedelta
from typing import List
from homeassistant.components.binary_sensor import (
BinarySensorEntity,
DEVICE_CLASS_MOTION
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import HomeAssistant
from wyzeapy.base_client import Device, AccessTokenError
from wyzeapy.client import Client
from wyzeapy.types import PropertyIDs
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Wyze"
SCAN_INTERVAL = timedelta(seconds=10)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities):
_LOGGER.debug("""Creating new WyzeApi binary sensor component""")
client: Client = hass.data[DOMAIN][config_entry.entry_id]
def get_cameras() -> List[Device]:
try:
return client.get_cameras()
except AccessTokenError as e:
_LOGGER.warning(e)
client.reauthenticate()
return client.get_cameras()
cameras = [WyzeCameraMotion(client, camera) for camera in await hass.async_add_executor_job(get_cameras)]
async_add_entities(cameras, True)
class WyzeCameraMotion(BinarySensorEntity):
_on: bool
_available: bool
def __init__(self, wyzeapi_client: Client, device: Device):
self._client = wyzeapi_client
self._device = device
self._last_event = int(str(int(time.time())) + "000")
@property
def device_info(self):
return {
"identifiers": {
(DOMAIN, self._device.mac)
},
"name": self.name,
"manufacturer": "WyzeLabs",
"model": self._device.product_model
}
@property
def available(self) -> bool:
return self._available
@property
def name(self):
"""Return the display name of this switch."""
return self._device.nickname
@property
def is_on(self):
"""Return true if switch is on."""
return self._on
@property
def unique_id(self):
return "{}-motion".format(self._device.mac)
@property
def device_state_attributes(self):
"""Return device attributes of the entity."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"state": self.is_on,
"available": self.available,
"device model": self._device.product_model,
"mac": self.unique_id
}
@property
def device_class(self):
return DEVICE_CLASS_MOTION
def update(self):
try:
device_info = self._client.get_info(self._device)
except AccessTokenError:
self._client.reauthenticate()
device_info = self._client.get_info(self._device)
for property_id, value in device_info:
if property_id == PropertyIDs.AVAILABLE:
self._available = True if value == "1" else False
latest_event = self._client.get_latest_event(self._device)
if latest_event is not None:
if latest_event.event_ts > self._last_event:
self._on = True
self._last_event = latest_event.event_ts
else:
self._on = False
self._last_event = latest_event.event_ts
else:
self._on = False
| 29.059829
| 109
| 0.645
| 382
| 3,400
| 5.489529
| 0.290576
| 0.042918
| 0.024797
| 0.025751
| 0.104912
| 0.079161
| 0.079161
| 0.079161
| 0.043872
| 0.043872
| 0
| 0.002419
| 0.270588
| 3,400
| 116
| 110
| 29.310345
| 0.843145
| 0.031765
| 0
| 0.233333
| 0
| 0
| 0.044899
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.133333
| 0.044444
| 0.377778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9c4cf9fb1ad31300587d3e24030d9670ed150d3
| 3,342
|
py
|
Python
|
src/tornado-3.2.2/tornado/platform/common.py
|
code-annotator/tornado-annotated
|
78fa3ab3b87a559c1db9ec11d86d79f6bf47853c
|
[
"MIT"
] | 645
|
2015-01-03T02:03:59.000Z
|
2021-12-03T08:43:16.000Z
|
filenv/lib/python2.7/site-packages/tornado/platform/common.py
|
betoesquivel/fil2014
|
4c2e9188769096391bb206b76ed1ab8bd2ff66a1
|
[
"MIT"
] | 2
|
2021-04-30T20:29:40.000Z
|
2022-02-11T03:38:04.000Z
|
filenv/lib/python2.7/site-packages/tornado/platform/common.py
|
betoesquivel/fil2014
|
4c2e9188769096391bb206b76ed1ab8bd2ff66a1
|
[
"MIT"
] | 222
|
2015-01-07T05:00:52.000Z
|
2021-12-06T09:54:26.000Z
|
"""Lowest-common-denominator implementations of platform functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import socket
from tornado.platform import interface
class Waker(interface.Waker):
"""Create an OS independent asynchronous pipe.
For use on platforms that don't have os.pipe() (or where pipes cannot
be passed to select()), but do have sockets. This includes Windows
and Jython.
"""
def __init__(self):
# Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
self.writer = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up ASAP.
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
a.listen(1)
connect_address = a.getsockname() # assigned (host, port) pair
try:
self.writer.connect(connect_address)
break # success
except socket.error as detail:
if (not hasattr(errno, 'WSAEADDRINUSE') or
detail[0] != errno.WSAEADDRINUSE):
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
self.writer.close()
raise socket.error("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
self.reader, addr = a.accept()
self.reader.setblocking(0)
self.writer.setblocking(0)
a.close()
self.reader_fd = self.reader.fileno()
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.send(b"x")
except (IOError, socket.error):
pass
def consume(self):
try:
while True:
result = self.reader.recv(1024)
if not result:
break
except (IOError, socket.error):
pass
def close(self):
self.reader.close()
self.writer.close()
| 36.326087
| 86
| 0.55775
| 415
| 3,342
| 4.450602
| 0.498795
| 0.043313
| 0.025988
| 0.030861
| 0.033568
| 0.033568
| 0
| 0
| 0
| 0
| 0
| 0.021296
| 0.35368
| 3,342
| 91
| 87
| 36.725275
| 0.833796
| 0.382406
| 0
| 0.269231
| 0
| 0
| 0.021361
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0.038462
| 0.076923
| 0.038462
| 0.25
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9c69927875c451378bcb7d50069e903036beefa
| 5,490
|
py
|
Python
|
bathymetry_blink/bathymetry_blink.py
|
poster515/BlinkyTape_Python
|
edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0
|
[
"MIT"
] | 26
|
2015-02-14T11:37:21.000Z
|
2021-05-10T17:24:16.000Z
|
bathymetry_blink/bathymetry_blink.py
|
poster515/BlinkyTape_Python
|
edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0
|
[
"MIT"
] | 8
|
2015-02-14T17:33:24.000Z
|
2021-10-05T20:32:19.000Z
|
bathymetry_blink/bathymetry_blink.py
|
poster515/BlinkyTape_Python
|
edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0
|
[
"MIT"
] | 15
|
2015-01-24T23:36:54.000Z
|
2021-10-02T23:40:08.000Z
|
"""
This script will modulate the blinky lights using the following algorithm:
1) uses user-provided location to obtain row of pixel data from bathy image
2) samples a 'number of LEDs' number of pixels from that row
3) shifts the sampled row data to center it at the location specified by user
4) displays resulting pixels on Blinky Tape
5) shifts next row by a given latitude, also specified by user
6) sleeps for user-specified period of time
Uses the following arguments:
-l/--location: tuple
Location of the user in tuple(lat, lon). This represents the center of the LED strip. Defaults to (0, 0)
-u/--update-interval: int
Update interval of the script, in minutes. Defaults to 10.
-p/--port: str
Serial port of the BlinkyLight (e.g., 'ttyAMA0', 'COM3'). Defaults to 'COM5'.
-d/--delta_latitude: int
Vertical change in latitude every update rate. May be 0, but this will result in a never-changing LEDs.
-i/--image: str
Name of the PNG image that contains the color coded pathymetric data.
The file current named mapserv.png was obtained using the following API:
https://www.gebco.net/data_and_products/gebco_web_services/web_map_service/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,180&format=image/png&height=600&width=1200&crs=EPSG:4326&layers=GEBCO_LATEST_SUB_ICE_TOPO&version=1.3.0
In lieu of providing command line arguments, you may alternatively edit the defaults in bath_config.json.
NOTE: runs via:
runfile('/BlinkyTape_Python/bathymetry_blink/bathymetry_blink.py', wdir='/BlinkyTape_Python/')
(C) 2021 Joseph Post (https://joeycodes.dev)
MIT Licensed
"""
import optparse
import json
from blinkytape import BlinkyTape
from time import sleep
from PIL import Image
import numpy as np
import sys
MAX_ERRORS = 3
num_errors = 0
# Obtain default parameters
with open("./bathymetry_blink/bathy_config.json") as f:
config = json.load(f)
# Default Blinky Tape port on Raspberry Pi is /dev/ttyACM0
parser = optparse.OptionParser()
parser.add_option("-p", "--port", dest="portname",
help="serial port (ex: /dev/ttyACM0)", default=config["port"])
parser.add_option("-l", "--location", dest="location",
help="Location of the center of the LED strip (ex: 70,-110)", default=config["location"])
parser.add_option("-u", "--update-rate", dest="update_rate",
help="How often to update elevation profile (mins) (ex: 5)", default=config["update_rate"])
parser.add_option("-d", "--delta-latitude", dest="delta_latitude",
help="Change in latitude during update (ex: 5)", default=config["delta_latitude"])
parser.add_option("-n", "--num-leds", dest="num_leds",
help="Number of LEDs in strip (ex: 60)", default=config["num_leds"])
parser.add_option("-i", "--image", dest="image_name",
help="Name of the map/bathymetry image (ex: ./mapserv.png)", default=config["image"])
(options, args) = parser.parse_args()
if args:
print("Unknown parameters: " + args)
# grab the values provided by user (or defaults)
port = options.portname
loc = options.location
rate = options.update_rate
delta = options.delta_latitude
n_leds = options.num_leds
i_name = options.image_name
# Some visual indication that it works, for headless setups (green tape)
bt = BlinkyTape(port, n_leds)
bt.displayColor(0, 100, 0)
bt.show()
sleep(2)
while True:
try:
# first, load image
im = Image.open(i_name) # Can be many different formats.
cols, rows = im.size
a = np.asarray(im) # of shape (rows, cols, channels)
# map loc latitude to 0-based index
latitude_index = min(rows - 1, max(0, (int)(((loc[0] - -90) / (90 - -90)) * (rows - 0) + 0)))
longitude_index = min(cols - 1, max(0, (int)(((loc[1] - -180) / (180 - -180)) * (cols - 0) + 0)))
# update the location of the next row of elevation data to take
loc[0] += delta
loc[0] = ((loc[0] + 90) % 180) - 90 # wraps to next pole if overflow
print("Lat index: " + str(latitude_index))
print("Lon index: " + str(longitude_index))
print("Next latitude: " + str(loc[0]))
# grab the applicable pixel indices
indices = [(int)(x*(cols/n_leds)) for x in range(n_leds)]
# sample that row of pixel data
output_pixels = np.take(a[latitude_index], indices, axis=0)
# rotate the row to center around the specified longitude
output_pixels = np.roll(output_pixels, longitude_index, axis=0)
# send all pixel data to bt
for pixel in output_pixels:
print("Sending r: {}, g: {}, b: {}".format(*pixel))
bt.sendPixel(*pixel)
# finally, show the image
bt.show()
# delete variables for memory management
del a
del im
# Tape resets to stored pattern after a few seconds of inactivity
sleep(rate * 60) # Wait specified number of minutes
# sleep(10) # Wait specified number of minutes
except KeyboardInterrupt:
print("Keyboard interrupt, ending program.")
sys.exit()
except RuntimeError as e:
print("Encountered runtime error: " + e.args[0])
# flush any incomplete data
bt.show()
num_errors += 1
if num_errors > MAX_ERRORS:
sys.exit("Error count exceeds that allowed.")
| 36.845638
| 230
| 0.654098
| 786
| 5,490
| 4.496183
| 0.375318
| 0.012733
| 0.025467
| 0.007923
| 0.034522
| 0.01245
| 0
| 0
| 0
| 0
| 0
| 0.025142
| 0.232058
| 5,490
| 149
| 231
| 36.845638
| 0.813093
| 0.436976
| 0
| 0.044118
| 0
| 0
| 0.213035
| 0.011673
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.102941
| 0
| 0.102941
| 0.102941
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9c6ca6076e88b29cf949f6ea50aa8a721054e5d
| 5,118
|
py
|
Python
|
service/transforms/export_submissions.py
|
SFDigitalServices/pts-dispatcher-microservice-py
|
80ec68d9d7f3f120a708717ed92c8b5a16742ff3
|
[
"MIT"
] | null | null | null |
service/transforms/export_submissions.py
|
SFDigitalServices/pts-dispatcher-microservice-py
|
80ec68d9d7f3f120a708717ed92c8b5a16742ff3
|
[
"MIT"
] | 4
|
2020-08-28T17:21:06.000Z
|
2021-06-02T01:52:16.000Z
|
service/transforms/export_submissions.py
|
SFDigitalServices/pts-dispatcher-microservice-py
|
80ec68d9d7f3f120a708717ed92c8b5a16742ff3
|
[
"MIT"
] | null | null | null |
""" Export Submissions Transform module """
#pylint: disable=too-few-public-methods
import pandas as pd
from .transform import TransformBase
from ..resources.field_configs import FieldConfigs
from ..resources.field_maps import FieldMaps
class ExportSubmissionsTransform(TransformBase):
""" Transform for Export Submissions """
def transform(self, data, sep):
"""
transform submissions from export
"""
output = list(map(self.get_data, data))
output = list(map(self.pretty_format, output))
output = [i for i in output if i is not None]
output = self.normalize(output)
output = self.to_csv(output, sep)
return output
# pylint: disable=R0201
def get_data(self, submission):
"""
Get data from submission object
"""
# skip permit type = existingPermitApplication submissions
#pylint: disable=too-many-nested-blocks
if submission['data']['permitType'] and submission['data']['permitType'] != 'existingPermitApplication':
output = {}
data = submission['data']
output['id'] = submission['_id']
output['created'] = submission['created']
#pylint: disable=too-many-nested-blocks
for key in data:
# flatten list values
if isinstance(data[key], list):
if len(data[key]) > 0:
if isinstance(data[key][0], (int, str)):
output[key] = ', '.join(map(str, data[key]))
else:
file_names = []
for index, val in enumerate(data[key]):
# if storage, concat filename
if 'storage' in val and 'originalName' in val:
file_names.append(val['originalName'])
else:
output[key+str(index+1)] = val
if len(file_names) > 0:
output[key] = ', '.join(file_names)
# flatten multi select values
elif isinstance(data[key], dict):
# building use code needs manual process
if FieldConfigs.is_building_use(key):
output[key] = self.convert_building_use(key, data[key], data)
# flatten nested address fields
elif FieldConfigs.is_nested_address_field(key):
output = self.convert_address_fields(key, data[key], output)
else:
multi_selects = []
for multi_key, multi_value in data[key].items():
if multi_value:
multi_selects.append(multi_key)
output[key] = ', '.join(multi_selects)
else:
output[key] = data[key]
return output
def normalize(self, data):
"""
Normalize data into a flat structure into DataFrame
"""
dataframe = pd.json_normalize(data)
# update column names
dataframe.rename(columns=self.pretty_string, inplace=True)
return dataframe
def to_csv(self, dataframe, sep=','):
"""
Return CSV from DataFrame
"""
return dataframe.to_csv(index=False, sep=sep, line_terminator='\r\n')
def pretty_format(self, data):
""" Pretty format data fields """
output = {}
if data:
data = self.set_pts_fields(data)
for key in data:
if self.datetime_valid(data[key]):
output[key] = self.pretty_time(data[key])
else:
field_key = FieldConfigs.get_field_key(key, 'map')
phone_appnum_key = FieldConfigs.get_field_key(key, 'pretty')
if field_key is not None:
output[key] = FieldMaps.map_key_value(field_key, data[key])
# manually add Fire Rating and proposed Fire Rating
if field_key == 'construction_type' and data[key] != '':
output = self.add_fire_rating(key, data[key], output)
# format phone numbers and building application number
elif phone_appnum_key is not None:
if phone_appnum_key == 'phone_fields':
output[key] = self.pretty_phonenumber(data[key])
# cleanse characters that break the csv
elif isinstance(data[key], (str, bytes)):
output[key] = data[key].replace('\n', '\t').replace('|', '')
# relabel field, if necessary
relabel_field = FieldConfigs.get_relabel_fields(key)
if relabel_field:
output[relabel_field] = output.pop(key)
output = self.reorder_fields(output)
return output
| 44.12069
| 112
| 0.520125
| 512
| 5,118
| 5.074219
| 0.279297
| 0.048499
| 0.023095
| 0.013087
| 0.046959
| 0.046959
| 0
| 0
| 0
| 0
| 0
| 0.002545
| 0.385893
| 5,118
| 115
| 113
| 44.504348
| 0.824053
| 0.150254
| 0
| 0.157895
| 0
| 0
| 0.038116
| 0.005919
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065789
| false
| 0
| 0.052632
| 0
| 0.197368
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9c723ccb8662448fc572ef43b245239e373eaa3
| 2,877
|
py
|
Python
|
python/ray/ml/tests/test_torch_trainer.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 22
|
2018-05-08T05:52:34.000Z
|
2020-04-01T10:09:55.000Z
|
python/ray/ml/tests/test_torch_trainer.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 73
|
2021-09-25T07:11:39.000Z
|
2022-03-26T07:10:59.000Z
|
python/ray/ml/tests/test_torch_trainer.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 10
|
2018-04-27T10:50:59.000Z
|
2020-02-24T02:41:43.000Z
|
import pytest
import torch
import ray
from ray.ml.predictors.integrations.torch import TorchPredictor
from ray.ml.train.integrations.torch import TorchTrainer
from ray import train
from ray.ml.examples.pytorch.torch_linear_example import train_func as linear_train_func
@pytest.fixture
def ray_start_4_cpus():
address_info = ray.init(num_cpus=4)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.mark.parametrize("num_workers", [1, 2])
def test_torch_linear(ray_start_4_cpus, num_workers):
def train_func(config):
result = linear_train_func(config)
assert len(result) == epochs
assert result[-1]["loss"] < result[0]["loss"]
num_workers = num_workers
epochs = 3
scaling_config = {"num_workers": num_workers}
config = {"lr": 1e-2, "hidden_size": 1, "batch_size": 4, "epochs": epochs}
trainer = TorchTrainer(
train_loop_per_worker=train_func,
train_loop_config=config,
scaling_config=scaling_config,
)
trainer.fit()
def test_torch_e2e(ray_start_4_cpus):
def train_func():
model = torch.nn.Linear(1, 1)
train.save_checkpoint(model=model)
scaling_config = {"num_workers": 2}
trainer = TorchTrainer(
train_loop_per_worker=train_func, scaling_config=scaling_config
)
result = trainer.fit()
predict_dataset = ray.data.range(3)
class TorchScorer:
def __init__(self):
self.pred = TorchPredictor.from_checkpoint(result.checkpoint)
def __call__(self, x):
return self.pred.predict(x, dtype=torch.float)
predictions = predict_dataset.map_batches(
TorchScorer, batch_format="pandas", compute="actors"
)
assert predictions.count() == 3
def test_torch_e2e_state_dict(ray_start_4_cpus):
def train_func():
model = torch.nn.Linear(1, 1).state_dict()
train.save_checkpoint(model=model)
scaling_config = {"num_workers": 2}
trainer = TorchTrainer(
train_loop_per_worker=train_func, scaling_config=scaling_config
)
result = trainer.fit()
# If loading from a state dict, a model definition must be passed in.
with pytest.raises(ValueError):
TorchPredictor.from_checkpoint(result.checkpoint)
class TorchScorer:
def __init__(self):
self.pred = TorchPredictor.from_checkpoint(
result.checkpoint, model=torch.nn.Linear(1, 1)
)
def __call__(self, x):
return self.pred.predict(x, dtype=torch.float)
predict_dataset = ray.data.range(3)
predictions = predict_dataset.map_batches(
TorchScorer, batch_format="pandas", compute="actors"
)
assert predictions.count() == 3
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| 28.77
| 88
| 0.681265
| 369
| 2,877
| 5.01897
| 0.281843
| 0.043737
| 0.019438
| 0.028078
| 0.534557
| 0.510799
| 0.470842
| 0.470842
| 0.446004
| 0.446004
| 0
| 0.012406
| 0.215502
| 2,877
| 99
| 89
| 29.060606
| 0.808152
| 0.041363
| 0
| 0.418919
| 0
| 0
| 0.042468
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 1
| 0.148649
| false
| 0
| 0.121622
| 0.027027
| 0.324324
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9c9b89785f6cfc7757c2e1d1d401d256c20d14f
| 2,567
|
py
|
Python
|
Pzzzzz/plugins/wm.py
|
Pzzzzz5142/animal-forest-QQ-group-bot
|
a9141a212a7746ac95d28459ec9cec5b6c188b35
|
[
"MIT"
] | 5
|
2020-05-28T06:29:33.000Z
|
2020-09-30T12:14:46.000Z
|
Pzzzzz/plugins/wm.py
|
Pzzzzz5142/xjbx-QQ-group-bot
|
a9141a212a7746ac95d28459ec9cec5b6c188b35
|
[
"MIT"
] | null | null | null |
Pzzzzz/plugins/wm.py
|
Pzzzzz5142/xjbx-QQ-group-bot
|
a9141a212a7746ac95d28459ec9cec5b6c188b35
|
[
"MIT"
] | null | null | null |
from nonebot import CommandSession, on_command
from langdetect import detect, detect_langs
from aiohttp import ClientSession
from nonebot import get_bot
from nonebot.argparse import ArgumentParser
import time
import hmac
import random, sys
import hashlib
import binascii
import urllib
bot = get_bot()
# 百度通用翻译API,不包含词典、tts语音合成等资源,如有相关需求请联系[email protected]
# coding=utf-8
import hashlib
import urllib
import random
@on_command("wm", aliases={"翻译", "translate"}, only_to_me=False)
async def wm(session: CommandSession):
session.get("token", prompt="请输入你想翻译的句子!")
myurl = "/api/trans/vip/translate"
q = session.state["token"]
fromLang = session.state["fr"] # 原文语种
toLang = session.state["to"] # 译文语种
salt = random.randint(32768, 65536)
sign = bot.config.BAIDUAPI + q + str(salt) + bot.config.BAIDUKey
sign = hashlib.md5(sign.encode()).hexdigest()
myurl = (
myurl
+ "?appid="
+ bot.config.BAIDUAPI
+ "&q="
+ urllib.parse.quote(q)
+ "&from="
+ fromLang
+ "&to="
+ toLang
+ "&salt="
+ str(salt)
+ "&sign="
+ sign
)
async with ClientSession() as sess:
async with sess.get("https://fanyi-api.baidu.com" + myurl) as resp:
if resp.status != 200:
pass
ShitAns = await resp.json()
try:
ans = [i["dst"] for i in ShitAns["trans_result"]]
ans = "\n".join(ans)
except:
session.finish("翻译错误,原因是:" + ShitAns["error_code"])
session.finish("翻译结果为:\n" + ans)
@wm.args_parser
async def _(session: CommandSession):
arg = session.current_arg_text.strip()
if session.is_first_run:
parser = ArgumentParser(session=session)
parser.add_argument("--fr", "-f", type=str, default="no")
parser.add_argument("--to", "-t", type=str, default="no")
parser.add_argument("token", type=str, default="", nargs="+")
argv = parser.parse_args(session.current_arg.split(" "))
arg = " ".join(argv.token)
if arg == "":
session.pause("输入不能为空哦!")
session.state["fr"] = detect(arg) if argv.fr == "no" else argv.fr
if session.state["fr"][:2] == "zh":
session.state["fr"] = "zh"
if argv.to == "no":
if session.state["fr"] == "zh":
session.state["to"] = "en"
else:
session.state["to"] = "zh"
else:
session.state["to"] = argv.to
if argv.fr == "no":
session.state["fr"] = "auto"
session.state["token"] = arg
| 27.021053
| 75
| 0.592131
| 314
| 2,567
| 4.773885
| 0.401274
| 0.096064
| 0.056037
| 0.024016
| 0.044029
| 0.044029
| 0.044029
| 0
| 0
| 0
| 0
| 0.008333
| 0.252045
| 2,567
| 94
| 76
| 27.308511
| 0.772396
| 0.031944
| 0
| 0.078947
| 0
| 0
| 0.095968
| 0.009677
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.013158
| 0.184211
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9ca7d1ad949a33a39144490cd6ec4bc4a1910a2
| 5,375
|
py
|
Python
|
home/scripts/memory/lpsolve.py
|
ParksProjets/Mips-Applications
|
d4284a5ee357b0e5f348b9af28bb0d90c036ae99
|
[
"MIT"
] | 1
|
2019-01-08T08:41:22.000Z
|
2019-01-08T08:41:22.000Z
|
home/scripts/memory/lpsolve.py
|
ParksProjets/Mips-Applications
|
d4284a5ee357b0e5f348b9af28bb0d90c036ae99
|
[
"MIT"
] | null | null | null |
home/scripts/memory/lpsolve.py
|
ParksProjets/Mips-Applications
|
d4284a5ee357b0e5f348b9af28bb0d90c036ae99
|
[
"MIT"
] | null | null | null |
"""
LpSolve wrapper.
Copyright (C) 2018, Guillaume Gonnet
License MIT
"""
from ctypes import *
import sys
import os.path as path
import platform
# Import the DLL
ver = ("x86", "x64")[sys.maxsize > 2**32]
here = path.dirname(__file__)
if sys.platform == "win32":
lib = windll.LoadLibrary(path.abspath(path.join(here, "dll/lpsolve55-%s.dll" % ver)))
elif sys.platform == "linux":
lib = cdll.LoadLibrary(path.abspath(path.join(here, "dll/lpsolve55-%s.so" % ver)))
else:
raise ValueError("Can't load LpSolve library on this platform.")
# Make the bindings
c_double_p = POINTER(c_double)
c_int_p = POINTER(c_int)
lib.make_lp.argtypes = [c_int, c_int]
lib.make_lp.restype = c_void_p
lib.delete_lp.argtypes = [c_void_p]
lib.set_binary.argtypes = [c_void_p, c_int, c_ubyte]
lib.set_binary.restype = c_ubyte
lib.set_int.argtypes = [c_void_p, c_int, c_ubyte]
lib.set_int.restype = c_ubyte
lib.add_constraintex.argtypes = [c_void_p, c_int, c_double_p, c_int_p, c_int, c_double]
lib.add_constraintex.restype = c_ubyte
lib.set_obj_fnex.argtypes = [c_void_p, c_int, c_double_p, c_int_p]
lib.set_obj_fnex.restype = c_ubyte
lib.set_add_rowmode.argtypes = [c_void_p, c_ubyte]
lib.set_add_rowmode.restype = c_ubyte
lib.set_maxim.argtypes = [c_void_p]
lib.write_lp.argtypes = [c_void_p, c_char_p]
lib.write_lp.restype = c_ubyte
lib.set_verbose.argtypes = [c_void_p, c_int]
lib.solve.argtypes = [c_void_p]
lib.solve.restype = c_int
lib.get_variables.argtypes = [c_void_p, c_double_p]
lib.get_variables.restype = c_ubyte
class LpEngine(object):
"The Linear Programming Engine."
def __init__(self, maxvars, debug=False):
self.debug = debug
self.maxvars = maxvars
self.vars = []
self.lp = lib.make_lp(0, maxvars)
assert self.lp != 0, "Can't construct a new LpSolve model"
self.colbuff = (c_int * maxvars)()
self.rowbuff = (c_double * maxvars)()
lib.set_add_rowmode(self.lp, 1)
def __del__(self):
lib.delete_lp(self.lp)
def constraint(self, const):
"Add a new constraint into the model."
assert const.optype is not None, "You must provide the RHS of constraint"
const.fill_buffers(self.colbuff, self.rowbuff)
ret = lib.add_constraintex(self.lp, len(const.vars), cast(self.rowbuff, c_double_p),
cast(self.colbuff, c_int_p), const.optype, const.rhs)
assert ret == 1, "Can't add constraint into model"
def objective(self, const):
"Set the objective function."
lib.set_add_rowmode(self.lp, 0)
const.fill_buffers(self.colbuff, self.rowbuff)
ret = lib.set_obj_fnex(self.lp, len(const.vars), cast(self.rowbuff, c_double_p),
cast(self.colbuff, c_int_p))
assert ret == 1, "Can't set objective function of model"
def update_variables(self):
"Update the variable values."
ret = lib.get_variables(self.lp, cast(self.rowbuff, c_double_p))
assert ret == 1, "Can't get variable values"
for i, var in enumerate(self.vars):
var.value = self.rowbuff[i]
def solve(self):
"Solve the model."
lib.set_maxim(self.lp)
if self.debug:
lib.write_lp(self.lp, b"debug-model.lp")
else:
lib.set_verbose(self.lp, 3)
ret = lib.solve(self.lp)
if ret == 0 or ret == 1:
self.update_variables()
return ret
class LpVariable(object):
"A LpSolve variable."
def __init__(self, lp, vtype="real"):
assert len(lp.vars) < lp.maxvars, "Can't add a variable: "
self.index = len(lp.vars) + 1
self.value = None
self.lp = lp
lp.vars.append(self)
self.type = "real"
self.retype(vtype)
def retype(self, vtype):
"Change the type of the variable"
if "bin" in (self.type, vtype):
lib.set_binary(self.lp.lp, self.index, (vtype == "bin"))
elif "int" in (self.type, vtype):
lib.set_binary(self.lp.lp, self.index, (vtype == "int"))
def __rmul__(self, num):
return LpConstraint([num], [self])
def __add__(self, other):
if isinstance(other, LpConstraint):
return other.__add__(self)
return LpConstraint([1, 1], [self, other])
class LpConstraint(object):
"A LpSolve constraint."
def __init__(self, numbers, vars):
self.numbers = numbers
self.vars = vars
self.optype = None
self.rhs = None
def fill_buffers(self, colno, row):
"Fill colno and row buffers for calling LpSolve."
for i, (num, var) in enumerate(zip(self.numbers, self.vars)):
colno[i] = var.index
row[i] = num
def __add__(self, other):
if isinstance(other, LpVariable):
return LpConstraint(self.numbers + [1], self.vars + [other])
else:
c = LpConstraint(self.numbers + other.numbers, self.vars + other.vars)
assert len(c.vars) == len(set(c.vars)), "Some variables appear several times"
return c
def __le__(self, val):
self.optype, self.rhs = (1, val)
return self
def __eq__(self, val):
self.optype, self.rhs = (3, val)
return self
def __ge__(self, val):
self.optype, self.rhs = (2, val)
return self
| 25.116822
| 92
| 0.630884
| 790
| 5,375
| 4.083544
| 0.201266
| 0.031618
| 0.022319
| 0.047737
| 0.32734
| 0.253255
| 0.182269
| 0.16243
| 0.16243
| 0.106014
| 0
| 0.008387
| 0.245767
| 5,375
| 213
| 93
| 25.234742
| 0.787371
| 0.067535
| 0
| 0.077519
| 0
| 0
| 0.115881
| 0
| 0
| 0
| 0
| 0
| 0.054264
| 1
| 0.124031
| false
| 0
| 0.031008
| 0.007752
| 0.248062
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9cb7d0cdfc5b919d86c41747507b434bce2ff4e
| 2,595
|
py
|
Python
|
scripts/charts.py
|
yshrdbrn/bigdata
|
51114ae98354ee094e0bcff26c1814f85c434148
|
[
"MIT"
] | null | null | null |
scripts/charts.py
|
yshrdbrn/bigdata
|
51114ae98354ee094e0bcff26c1814f85c434148
|
[
"MIT"
] | 1
|
2020-02-01T04:53:43.000Z
|
2020-02-01T04:53:43.000Z
|
scripts/charts.py
|
yshrdbrn/bigdata
|
51114ae98354ee094e0bcff26c1814f85c434148
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import pandas as pd
def group_by_category(df):
grouped = df.groupby(['CATEGORY']).size().to_frame('Crimes')
labels = ['Trespassing', 'Vehicle theft', 'General Theft',
'Damage to Property', 'Robbery', 'Homicide']
p = grouped.plot.pie(y='Crimes', labels=labels, autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Category')
p.get_legend().remove()
plt.savefig('../charts/category.png')
def group_by_time_of_day(df):
grouped = df.groupby(['TIME_OF_DAY']).size().to_frame('Crimes')
p = grouped.plot.pie(y='Crimes', labels=['Day', 'Evening', 'Night'], autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Time of Day')
p.get_legend().remove()
plt.savefig('../charts/time_of_day.png')
def group_by_day_of_the_week(df):
grouped = df.groupby(['DAY_OF_THE_WEEK']).size().to_frame('Crimes')
labels = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
p = grouped.plot.pie(y='Crimes', labels=labels, autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Day of The Week')
p.get_legend().remove()
plt.savefig('../charts/day_of_the_week.png')
def group_by_month(df):
grouped = df.groupby(['MONTH']).size().to_frame('Size')
grouped['Percentage'] = 100 * grouped['Size'] / len(df)
grouped = grouped.drop(columns='Size')
p = grouped.plot.bar()
p.set_title('Crimes Percentage Grouped By Month')
p.set_ylabel('Percentage of Crimes')
p.set_xlabel('Month')
p.get_legend().remove()
plt.savefig('../charts/month.png')
def group_by_year(df):
grouped = df.groupby(['YEAR']).size().to_frame('Crimes')
p = grouped.plot.pie(y='Crimes', autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Year')
p.get_legend().remove()
plt.savefig('../charts/year.png')
def group_by_territory(df):
grouped = df.groupby(['PDQ']).size().to_frame('Size')
grouped['Percentage'] = 100 * grouped['Size'] / len(df)
grouped = grouped.drop(columns='Size')
grouped.index = grouped.index.astype(int)
p = grouped.plot.bar()
p.set_title('Crimes Percentage Grouped By Territory')
p.set_ylabel('Percentage of Crimes')
p.set_xlabel('Territory Number')
p.get_legend().remove()
plt.savefig('../charts/territory.png')
if __name__ == '__main__':
df = pd.read_csv('../data/crimes_dataset_processed_incomplete.csv')
group_by_territory(df)
group_by_year(df)
group_by_month(df)
group_by_time_of_day(df)
group_by_day_of_the_week(df)
group_by_category(df)
| 38.161765
| 91
| 0.668208
| 373
| 2,595
| 4.439678
| 0.217158
| 0.050725
| 0.036232
| 0.065217
| 0.597826
| 0.562802
| 0.541063
| 0.396135
| 0.396135
| 0.350242
| 0
| 0.006358
| 0.151445
| 2,595
| 67
| 92
| 38.731343
| 0.745686
| 0
| 0
| 0.275862
| 0
| 0
| 0.300193
| 0.056262
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0.017241
| 0.034483
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9cc56ba272dad2f5e9b82b388ad10350a722906
| 15,349
|
py
|
Python
|
unittests.py
|
benjaminkrenn/abcvoting
|
1e3833a7314d3467de7560f7e531a4c35c6eda08
|
[
"MIT"
] | null | null | null |
unittests.py
|
benjaminkrenn/abcvoting
|
1e3833a7314d3467de7560f7e531a4c35c6eda08
|
[
"MIT"
] | null | null | null |
unittests.py
|
benjaminkrenn/abcvoting
|
1e3833a7314d3467de7560f7e531a4c35c6eda08
|
[
"MIT"
] | null | null | null |
# Unit tests
import unittest
def run_test_instance(unittestinstance, profile, committeesize, tests):
import rules_approval
# all rules used?
for rule in rules_approval.MWRULES:
unittestinstance.assertTrue(rule in tests.keys())
for rule in tests.keys():
output = rules_approval.compute_rule(rule, profile,
committeesize,
resolute=False)
unittestinstance.assertEqual(
output, tests[rule], msg=rules_approval.MWRULES[rule] + " failed")
output = rules_approval.compute_rule(
rule, profile, committeesize, resolute=True)
unittestinstance.assertEqual(
len(output), 1,
msg=rules_approval.MWRULES[rule] + " failed with resolute=True")
unittestinstance.assertTrue(
output[0] in tests[rule],
msg=rules_approval.MWRULES[rule] + " failed with resolute=True")
class TestApprovalMultiwinner(unittest.TestCase):
def test_createprofiles(self):
from preferences import Profile
from preferences import DichotomousPreferences
num_cand = 7
prof = Profile(num_cand)
self.assertEqual(prof.add_preferences(
DichotomousPreferences([0, 4, 5])),
None)
with self.assertRaises(Exception):
prof.add_preferences(DichotomousPreferences([num_cand]))
with self.assertRaises(Exception):
prof.add_preferences(DichotomousPreferences([-1]))
self.assertEqual(prof.add_preferences([0, 4, 5]), None)
with self.assertRaises(Exception):
prof.add_preferences([0, 4, 5, "1"])
with self.assertRaises(Exception):
prof.add_preferences(["1", 0, 4, 5])
p1 = DichotomousPreferences([0, 4, 5])
p2 = DichotomousPreferences([1, 2])
self.assertEqual(prof.add_preferences([p1, p2]), None)
self.assertTrue(prof.has_unit_weights())
prof.add_preferences(DichotomousPreferences([0, 4, 5], 2.4))
self.assertFalse(prof.has_unit_weights())
self.assertEqual(prof.totalweight(), 6.4)
def test_mwrules__toofewcandidates(self):
from preferences import Profile
import rules_approval
profile = Profile(5)
committeesize = 4
preflist = [[0, 1, 2], [1], [1, 2], [0]]
profile.add_preferences(preflist)
for rule in rules_approval.MWRULES.keys():
with self.assertRaises(Exception):
rules_approval.compute_rule(rule, profile, committeesize)
with self.assertRaises(Exception):
rules_approval.compute_rule(rule, profile,
committeesize, resolute=True)
def test_mwrules_weightsconsidered(self):
from preferences import Profile
from preferences import DichotomousPreferences
import rules_approval
self.longMessage = True
profile = Profile(3)
profile.add_preferences(DichotomousPreferences([0]))
profile.add_preferences(DichotomousPreferences([0]))
profile.add_preferences(DichotomousPreferences([1], 5))
profile.add_preferences(DichotomousPreferences([0]))
committeesize = 1
for rule in rules_approval.MWRULES.keys():
if "monroe" in rule or "rule-x" in rule:
# Monroe and rule x only work with unit weights:
continue
result = rules_approval.compute_rule(rule, profile, committeesize)
self.assertTrue([1] in result,
msg=rule + " failed"+str(result))
def test_mwrules_correct_simple(self):
from preferences import Profile
import rules_approval
self.longMessage = True
profile = Profile(4)
profile.add_preferences([[0], [1], [2], [3]])
committeesize = 2
for rule in rules_approval.MWRULES.keys():
if rule == "greedy-monroe": # always returns one committee
continue
self.assertEqual(len(rules_approval.compute_rule(rule, profile,
committeesize)),
6, msg=rule + " failed")
for rule in rules_approval.MWRULES.keys():
self.assertEqual(len(rules_approval.compute_rule(rule, profile,
committeesize,
resolute=True)),
1, msg=rule + " failed with resolute=True")
def test_monroe_indivisible(self):
from preferences import Profile
import rules_approval
self.longMessage = True
profile = Profile(4)
profile.add_preferences([[0], [0], [0], [1, 2], [1, 2], [1], [3]])
committeesize = 3
for ilp in [True, False]:
# max Monroe score is 6 (even for committee [0, 1, 3])
self.assertEqual(
rules_approval.compute_monroe(profile, committeesize,
ilp=ilp, resolute=False),
[[0, 1, 2], [0, 1, 3], [0, 2, 3]])
# this test shows that tiebreaking is not (yet)
# implemented for opt-Phragmen
def test_optphrag_notiebreaking(self):
from preferences import Profile
from rules_approval import compute_rule
self.longMessage = True
profile = Profile(6)
profile.add_preferences([[0], [0], [1, 3], [1, 3], [1, 4],
[2, 4], [2, 5], [2, 5]])
committeesize = 3
self.assertEqual(
len(compute_rule("optphrag", profile, committeesize,
resolute=False)),
12)
def test_mwrules_correct_advanced_1(self):
from preferences import Profile
self.longMessage = True
committeesize = 4
profile = Profile(6)
preflist = [[0, 4, 5], [0], [1, 4, 5], [1],
[2, 4, 5], [2], [3, 4, 5], [3]]
profile.add_preferences(preflist)
tests1 = {
"seqpav": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"av": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"sav": [[0, 1, 2, 3], [0, 1, 2, 4], [0, 1, 2, 5], [0, 1, 3, 4],
[0, 1, 3, 5], [0, 1, 4, 5], [0, 2, 3, 4], [0, 2, 3, 5],
[0, 2, 4, 5], [0, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"pav-ilp": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"pav-noilp": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"revseqpav": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"minimaxav-noilp": [[0, 1, 2, 3], [0, 1, 2, 4], [0, 1, 2, 5],
[0, 1, 3, 4], [0, 1, 3, 5], [0, 1, 4, 5],
[0, 2, 3, 4], [0, 2, 3, 5], [0, 2, 4, 5],
[0, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"minimaxav-ilp": [[0, 1, 2, 3], [0, 1, 2, 4], [0, 1, 2, 5],
[0, 1, 3, 4], [0, 1, 3, 5], [0, 1, 4, 5],
[0, 2, 3, 4], [0, 2, 3, 5], [0, 2, 4, 5],
[0, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"phrag": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"optphrag": [[0, 1, 2, 3]],
"cc-ilp": [[0, 1, 2, 3]],
"cc-noilp": [[0, 1, 2, 3]],
"seqcc": [[0, 1, 2, 4], [0, 1, 2, 5], [0, 1, 3, 4], [0, 1, 3, 5],
[0, 2, 3, 4], [0, 2, 3, 5], [1, 2, 3, 4], [1, 2, 3, 5]],
"revseqcc": [[0, 1, 2, 3]],
"monroe-ilp": [[0, 1, 2, 3]],
"monroe-noilp": [[0, 1, 2, 3]],
"greedy-monroe": [[0, 2, 3, 4]],
"slav-ilp": [[0, 1, 2, 3],
[0, 1, 2, 4], [0, 1, 2, 5],
[0, 1, 3, 4], [0, 1, 3, 5],
[0, 2, 3, 4], [0, 2, 3, 5],
[1, 2, 3, 4], [1, 2, 3, 5]],
"slav-noilp": [[0, 1, 2, 3],
[0, 1, 2, 4], [0, 1, 2, 5],
[0, 1, 3, 4], [0, 1, 3, 5],
[0, 2, 3, 4], [0, 2, 3, 5],
[1, 2, 3, 4], [1, 2, 3, 5]],
"seqslav": [[0, 1, 2, 4], [0, 1, 2, 5],
[0, 1, 3, 4], [0, 1, 3, 5],
[0, 2, 3, 4], [0, 2, 3, 5],
[1, 2, 3, 4], [1, 2, 3, 5]],
"rule-x": [[0, 1, 4, 5], [0, 2, 4, 5],
[0, 3, 4, 5], [1, 2, 4, 5],
[1, 3, 4, 5], [2, 3, 4, 5]],
"phragmen-enestroem": [[0, 1, 4, 5], [0, 2, 4, 5],
[0, 3, 4, 5], [1, 2, 4, 5],
[1, 3, 4, 5], [2, 3, 4, 5]],
}
run_test_instance(self, profile, committeesize, tests1)
# and now with reversed preflist
preflist.reverse()
for p in preflist:
p.reverse()
profile = Profile(6)
profile.add_preferences(preflist)
run_test_instance(self, profile, committeesize, tests1)
def test_mwrules_correct_advanced_2(self):
from preferences import Profile
self.longMessage = True
# and another profile
profile = Profile(5)
committeesize = 3
preflist = [[0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2],
[0, 1, 2], [0, 1], [3, 4], [3, 4], [3]]
profile.add_preferences(preflist)
tests2 = {
"seqpav": [[0, 1, 3]],
"av": [[0, 1, 2]],
"sav": [[0, 1, 3]],
"pav-ilp": [[0, 1, 3]],
"pav-noilp": [[0, 1, 3]],
"revseqpav": [[0, 1, 3]],
"minimaxav-noilp": [[0, 1, 3], [0, 2, 3], [1, 2, 3]],
"minimaxav-ilp": [[0, 1, 3], [0, 2, 3], [1, 2, 3]],
"phrag": [[0, 1, 3]],
"optphrag": [[0, 1, 3], [0, 2, 3], [1, 2, 3]],
"cc-ilp": [[0, 1, 3], [0, 2, 3], [0, 3, 4],
[1, 2, 3], [1, 3, 4]],
"cc-noilp": [[0, 1, 3], [0, 2, 3], [0, 3, 4],
[1, 2, 3], [1, 3, 4]],
"seqcc": [[0, 1, 3], [0, 2, 3], [0, 3, 4],
[1, 2, 3], [1, 3, 4]],
"revseqcc": [[0, 1, 3], [0, 2, 3], [0, 3, 4],
[1, 2, 3], [1, 3, 4]],
"monroe-ilp": [[0, 1, 3], [0, 2, 3], [1, 2, 3]],
"monroe-noilp": [[0, 1, 3], [0, 2, 3], [1, 2, 3]],
"greedy-monroe": [[0, 1, 3]],
"seqslav": [[0, 1, 3]],
"slav-ilp": [[0, 1, 3]],
"slav-noilp": [[0, 1, 3]],
"rule-x": [[0, 1, 3]],
"phragmen-enestroem": [[0, 1, 3]],
}
run_test_instance(self, profile, committeesize, tests2)
def test_mwrules_correct_advanced_3(self):
from preferences import Profile
self.longMessage = True
# and a third profile
profile = Profile(6)
committeesize = 4
preflist = [[0, 3, 4, 5], [1, 2], [0, 2, 5], [2],
[0, 1, 2, 3, 4], [0, 3, 4], [0, 2, 4], [0, 1]]
profile.add_preferences(preflist)
tests3 = {
"seqpav": [[0, 1, 2, 4]],
"av": [[0, 1, 2, 4], [0, 2, 3, 4]],
"sav": [[0, 1, 2, 4]],
"pav-ilp": [[0, 1, 2, 4]],
"pav-noilp": [[0, 1, 2, 4]],
"revseqpav": [[0, 1, 2, 4]],
"minimaxav-noilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 2, 3, 4], [0, 2, 3, 5],
[0, 2, 4, 5]],
"minimaxav-ilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 2, 3, 4], [0, 2, 3, 5],
[0, 2, 4, 5]],
"phrag": [[0, 1, 2, 4]],
"optphrag": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"cc-ilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"cc-noilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"seqcc": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5]],
"revseqcc": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"monroe-ilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"monroe-noilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"greedy-monroe": [[0, 1, 2, 3]],
"seqslav": [[0, 1, 2, 4]],
"slav-ilp": [[0, 1, 2, 4]],
"slav-noilp": [[0, 1, 2, 4]],
"rule-x": [[0, 1, 2, 4]],
"phragmen-enestroem": [[0, 1, 2, 4]],
}
run_test_instance(self, profile, committeesize, tests3)
def test_monroescore(self):
from preferences import Profile
from score_functions import monroescore_flowbased, monroescore_matching
self.longMessage = True
# and a third profile
profile = Profile(6)
preflist = [[0, 1], [1], [1, 3], [4], [2], [1, 5, 3]]
profile.add_preferences(preflist)
self.assertEqual(monroescore_flowbased(profile, [1, 3, 2]), 5)
self.assertEqual(monroescore_matching(profile, [1, 3, 2]), 5)
self.assertEqual(monroescore_flowbased(profile, [2, 1, 5]), 4)
self.assertEqual(monroescore_matching(profile, [2, 1, 5]), 4)
self.assertEqual(monroescore_flowbased(profile, [2, 4, 5]), 3)
self.assertEqual(monroescore_matching(profile, [2, 5, 4]), 3)
if __name__ == '__main__':
unittest.main()
| 42.167582
| 79
| 0.410906
| 2,000
| 15,349
| 3.097
| 0.065
| 0.042299
| 0.035841
| 0.018082
| 0.67969
| 0.608492
| 0.524217
| 0.463513
| 0.379238
| 0.278172
| 0
| 0.135299
| 0.407714
| 15,349
| 363
| 80
| 42.283747
| 0.546035
| 0.020913
| 0
| 0.448845
| 0
| 0
| 0.047017
| 0
| 0
| 0
| 0
| 0
| 0.089109
| 1
| 0.036304
| false
| 0
| 0.066007
| 0
| 0.105611
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9cc99c89bae7a8c33f8aa618bc77a5eebb78e7c
| 7,638
|
py
|
Python
|
Robustness Check/Calculating Risk Factors/calculate_momentum_factor.py
|
behnoud-bazrafshan/ThesisPortfolio
|
2edda0109fb8aafc984b5dfc2e59cabb949b4a78
|
[
"MIT"
] | null | null | null |
Robustness Check/Calculating Risk Factors/calculate_momentum_factor.py
|
behnoud-bazrafshan/ThesisPortfolio
|
2edda0109fb8aafc984b5dfc2e59cabb949b4a78
|
[
"MIT"
] | null | null | null |
Robustness Check/Calculating Risk Factors/calculate_momentum_factor.py
|
behnoud-bazrafshan/ThesisPortfolio
|
2edda0109fb8aafc984b5dfc2e59cabb949b4a78
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import jdatetime
pd.options.mode.chained_assignment = None
# Read Bourseview data for market cap
# Concat all 75 tickers' data
me_list = []
for file_number in range(1, 76):
print(file_number)
me_path = f'E:/Thesis/New Sampling/Daily Data - Bourseview/'\
f'{file_number}.xlsx'
me_df = pd.read_excel(
me_path,
skiprows=7,
usecols=[2, 3, 11],
names=['date', 'open', 'market_cap'],
na_values='-'
)
# Change order from old to new dates
me_df = me_df[::-1].reset_index(drop=True)
me_df['date'] = me_df['date'].str.replace('-', '')
# Delete non-traded days
me_df.dropna(subset=['open'], inplace=True)
me_df.drop(columns='open', inplace=True)
# Create monthly dataframe
me_df = me_df.groupby(me_df['date'].str[:6]).last()
me_df = me_df.drop(columns=['date']).reset_index()
me_df.insert(1, 'ticker_num', file_number)
me_list.append(me_df)
me_df = pd.concat(me_list, ignore_index=True)
me_df = me_df.loc[(me_df['date'] >= '139212') & (me_df['date'] <= '139900')]
me_df.reset_index(drop=True, inplace=True)
# Read rahavard 365 data for calculating returns
close_list = []
for file_number in range(1, 76):
rahavard_path = f'E:/Thesis/New Sampling/Daily Data - Rahavard 365/'\
f'{file_number}.txt'
df = pd.read_csv(
rahavard_path,
usecols=[2, 7],
names=['date', 'close'],
header=0,
dtype={'date': str},
parse_dates=[0]
)
# Solve index reading problem, pandas add 2 index to the df
df.reset_index(drop=True, inplace=True)
# Convert to shamsi dates
df['date'] = df['date'].apply(
lambda x: jdatetime.date.fromgregorian(date=x).strftime('%Y%m%d')
)
# Create monthly dataframe
df = df.groupby(df['date'].str[:6]).last()
df = df.drop(columns=['date']).reset_index()
df.insert(1, 'ticker_num', file_number)
df['monthly_return'] = df['close'].pct_change()
close_list.append(df)
df = pd.concat(close_list, ignore_index=True)
df = df.loc[(df['date'] >= '139212') & (df['date'] <= '139900')]
# Read index df for indicating open market days
index_path = r'E:\Thesis\New Sampling\TEDPIX\شاخص كل6.xls'
index_df = pd.read_excel(
index_path,
usecols=[1],
names=['date'],
dtype={'date': str}
)
index_df.dropna(inplace=True)
# The list of all months
months = index_df['date'].str[:6].unique().tolist()
# The list of months that we need for calculating market cap
me_months = [
'139312', '139401', '139402', '139403', '139404', '139405', '139406',
'139407', '139408', '139409', '139410', '139411', '139412', '139501',
'139502', '139503', '139504', '139505', '139506', '139507', '139508',
'139509', '139510', '139511', '139512', '139601', '139602', '139603',
'139604', '139605', '139606', '139607', '139608', '139609', '139610',
'139611', '139612', '139701', '139702', '139703', '139704', '139705',
'139706', '139707', '139708', '139709', '139710', '139711', '139712',
'139801', '139802', '139803', '139804', '139805', '139806', '139807',
'139808', '139809', '139810', '139811', '139812'
]
# The list of months that we need for camculating MOM
mom_months = me_months[1:]
# Merge market cap and price dfs
merged_df = pd.merge(df, me_df, on=['ticker_num', 'date'])
# First, create a NaN column, and then add t-13 prices
merged_df.insert(5, 't-13 price', np.nan)
for month in mom_months:
# Find t-13 prices
for ticker in range(1, 76):
t_13 = months[months.index(month) - 13]
t_13_condtion = (merged_df['date'] == t_13)
ticker_condition = (merged_df['ticker_num'] == ticker)
try:
t_13_price = merged_df.loc[
t_13_condtion
& ticker_condition
]['close'].values[0]
previous_month = me_months[me_months.index(month) - 1]
t_1_condtion = (merged_df['date'] == previous_month)
merged_df.loc[
(t_1_condtion & ticker_condition), 't-13 price'
] = t_13_price
except:
pass
# Calculate last 12 months return for month t (t-1, t-12)
merged_df['past_year_return'] = (
(merged_df['close'] / merged_df['t-13 price'])
- 1
)
mom_list = []
for month in mom_months:
# Check t-13 price condition and t-1 market cap condition
previous_month = months[months.index(month) - 1]
me_condition = (merged_df['date'] == previous_month)
mom_condition = (merged_df['past_year_return'].notna())
portfo_const_df = merged_df.loc[me_condition & mom_condition]
# Split each month ME into two groups
conditions = [
(
portfo_const_df['market_cap']
> portfo_const_df['market_cap'].median()
),
(
portfo_const_df['market_cap']
<= portfo_const_df['market_cap'].median()
)
]
portfolio_size = np.select(conditions, ['B', 'S']).tolist()
portfo_const_df.insert(6, 'size', portfolio_size)
# Split each me portfolio into 3 MOM group
q = [0, .3, .7, 1]
labels = ['L', 'M', 'H']
x_b = portfo_const_df.loc[
portfo_const_df['size'] == 'B'
]['past_year_return']
b_mom = pd.qcut(x=x_b, q=q, labels=labels).to_dict()
x_s = portfo_const_df.loc[
portfo_const_df['size'] == 'S'
]['past_year_return']
s_mom = pd.qcut(x=x_s, q=q, labels=labels).to_dict()
portfo_const_df['mom'] = pd.Series(b_mom)
portfo_const_df['mom'].update(pd.Series(s_mom))
# Extrect portfolio ticker numbers
portfo_const_df['portfolio'] = (
portfo_const_df['size'] + portfo_const_df['mom']
)
bh = portfo_const_df.loc[
portfo_const_df['portfolio'] == 'BH'
]['ticker_num'].tolist()
bl = portfo_const_df.loc[
portfo_const_df['portfolio'] == 'BL'
]['ticker_num'].tolist()
sh = portfo_const_df.loc[
portfo_const_df['portfolio'] == 'SH'
]['ticker_num'].tolist()
sl = portfo_const_df.loc[
portfo_const_df['portfolio'] == 'SL'
]['ticker_num'].tolist()
# Calculating value-weighted return for each portfolio in month t
# Set conditions
month_condition = (merged_df['date'] == month)
bh_condition = merged_df['ticker_num'].isin(bh)
bl_condition = merged_df['ticker_num'].isin(bl)
sh_condition = merged_df['ticker_num'].isin(sh)
sl_condition = merged_df['ticker_num'].isin(sl)
# Construct portfolios
bh_portfolio = merged_df.loc[month_condition & bh_condition]
bl_portfolio = merged_df.loc[month_condition & bl_condition]
sh_portfolio = merged_df.loc[month_condition & sh_condition]
sl_portfolio = merged_df.loc[month_condition & sl_condition]
# Calculate value-weighted returns
bh_return = np.average(
bh_portfolio.monthly_return,
weights=bh_portfolio.market_cap
)
bl_return = np.average(
bl_portfolio.monthly_return,
weights=bl_portfolio.market_cap
)
sh_return = np.average(
sh_portfolio.monthly_return,
weights=sh_portfolio.market_cap
)
sl_return = np.average(
sl_portfolio.monthly_return,
weights=sl_portfolio.market_cap
)
# Calculate MOM, and add it to a list
mom = (
((sh_return + bh_return) / 2)
- ((sl_return + bl_return) / 2)
)
mom_list.append(mom)
mom_df = pd.Series(mom_list).to_excel('mom.xlsx')
| 38.38191
| 77
| 0.612857
| 1,040
| 7,638
| 4.269231
| 0.247115
| 0.056982
| 0.067342
| 0.021622
| 0.263063
| 0.216216
| 0.137387
| 0.110811
| 0.022523
| 0.022523
| 0
| 0.081327
| 0.238544
| 7,638
| 198
| 78
| 38.575758
| 0.682084
| 0.127258
| 0
| 0.070588
| 0
| 0
| 0.165502
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.005882
| 0.017647
| 0
| 0.017647
| 0.005882
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9cdaf9a83cf7f7590823c87b5b4ab6e714294e0
| 4,632
|
py
|
Python
|
source/lambda/geoip_downloader/index.py
|
aws-samples/siem-on-amazon-opensearch-service
|
9bac87d39e9fab04f483bae54ffe94948af096ff
|
[
"MIT-0"
] | 92
|
2021-09-14T06:41:06.000Z
|
2022-03-31T09:52:07.000Z
|
source/lambda/geoip_downloader/index.py
|
aws-samples/siem-on-amazon-opensearch-service
|
9bac87d39e9fab04f483bae54ffe94948af096ff
|
[
"MIT-0"
] | 74
|
2021-09-18T01:46:47.000Z
|
2022-03-28T10:46:59.000Z
|
source/lambda/geoip_downloader/index.py
|
aws-samples/siem-on-amazon-opensearch-service
|
9bac87d39e9fab04f483bae54ffe94948af096ff
|
[
"MIT-0"
] | 42
|
2021-09-16T23:00:00.000Z
|
2022-03-29T15:11:43.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
__copyright__ = ('Copyright Amazon.com, Inc. or its affiliates. '
'All Rights Reserved.')
__version__ = '2.7.1'
__license__ = 'MIT-0'
__author__ = 'Akihiro Nakajima'
__url__ = 'https://github.com/aws-samples/siem-on-amazon-opensearch-service'
import hashlib
import json
import os
import tarfile
import urllib.error
import urllib.parse
import urllib.request
import boto3
# get var from lambda environment
try:
s3bucket_name = os.environ['s3bucket_name']
license_key = os.environ['license_key']
except KeyError:
raise Exception('ERROR: impossible to get lambda environment')
s3key_prefix = os.environ.get('s3key_prefix', 'GeoLite2/')
s3 = boto3.resource('s3')
bucket = s3.Bucket(s3bucket_name)
url = 'https://download.maxmind.com/app/geoip_download?'
put_files = ['GeoLite2-City', 'GeoLite2-ASN', 'GeoLite2-Country']
def download_file(filename):
for suffix in ['tar.gz', 'tar.gz.sha256']:
values = {'edition_id': filename, 'license_key': license_key,
'suffix': suffix}
data = urllib.parse.urlencode(values)
try:
urllib.request.urlretrieve(
url + data, filename='/tmp/' + filename + '.' + suffix)
except urllib.error.HTTPError as err:
if err.status == 401:
return err.status
print(err)
raise Exception('ERROR: http error')
except Exception as err:
print(err)
raise Exception('ERROR: ' + err)
print('INFO: ' + filename + ' was downloaded')
return 200
def put_to_s3(filename):
with open('/tmp/' + filename + '.tar.gz.sha256') as f:
checksum = f.read().split()[0]
print('INFO: Checksum: ' + checksum)
with open('/tmp/' + filename + '.tar.gz', 'rb') as f:
calcurated_checksum = hashlib.sha256(f.read()).hexdigest()
if checksum not in calcurated_checksum:
print('ERROR: checksum is different. download is failed')
return False
with tarfile.open('/tmp/' + filename + '.tar.gz', 'r:gz') as tf:
directory = tf.getmembers()[0].name
tf.extractall(path='/tmp/')
mmdb = directory + '/' + filename + '.mmdb'
s3obj = s3key_prefix + filename + '.mmdb'
bucket.upload_file('/tmp/' + mmdb, s3obj)
print('INFO: uploaded {0} to s3://{1}/{2}'.format(
mmdb, s3bucket_name, s3obj))
def send(event, context, responseStatus, responseData, physicalResourceId=None,
noEcho=False):
# https://docs.aws.amazon.com/ja_jp/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html
responseUrl = event['ResponseURL']
print(responseUrl)
response_body = {}
response_body['Status'] = responseStatus
response_body['Reason'] = ('See the details in CloudWatch Log Stream: '
'' + context.log_stream_name)
response_body['PhysicalResourceId'] = (
physicalResourceId or context.log_stream_name)
response_body['StackId'] = event['StackId']
response_body['RequestId'] = event['RequestId']
response_body['LogicalResourceId'] = event['LogicalResourceId']
response_body['NoEcho'] = noEcho
response_body['Data'] = responseData
json_response_body = json.dumps(response_body)
print('Response body:\n' + json_response_body)
headers = {'content-type': 'application/json', }
req = urllib.request.Request(
event['ResponseURL'], json_response_body.encode(),
headers=headers, method='PUT')
try:
res = urllib.request.urlopen(req)
print('Status code: ' + str(res.status))
except Exception as e:
print('send(..) failed executing requests.put(..): ' + str(e))
def lambda_handler(event, context):
physicalResourceId = 'geoipdb'
status = 'None'
if event:
print(json.dumps(event))
try:
for filename in put_files:
status = download_file(filename)
if status == 401:
break
put_to_s3(filename)
except Exception as e:
print(e)
if event and 'RequestType' in event:
response = {'failed_reason': e}
send(event, context, 'FAILED', response, physicalResourceId)
if event and 'RequestType' in event:
if status == 401:
response = {'status': 'invalide_license_key'}
else:
response = {'status': 'downloaded'}
send(event, context, 'SUCCESS', response, physicalResourceId)
return(json.dumps(response))
| 34.827068
| 122
| 0.633636
| 527
| 4,632
| 5.43833
| 0.343454
| 0.058618
| 0.019888
| 0.018842
| 0.137474
| 0.095604
| 0.036985
| 0.036985
| 0.036985
| 0.036985
| 0
| 0.01498
| 0.236183
| 4,632
| 132
| 123
| 35.090909
| 0.795082
| 0.053109
| 0
| 0.111111
| 0
| 0
| 0.222552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.074074
| 0
| 0.138889
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9cdbec7cf44be7c5e8dcf70bed770879dcd7e21
| 16,679
|
py
|
Python
|
components/mroipac/baseline/Baseline.py
|
earthobservatory/isce2
|
655c46cc4add275879167b750a5e91f6d00f168e
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-10-06T12:21:02.000Z
|
2019-10-06T12:21:02.000Z
|
components/mroipac/baseline/Baseline.py
|
earthobservatory/isce2
|
655c46cc4add275879167b750a5e91f6d00f168e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
components/mroipac/baseline/Baseline.py
|
earthobservatory/isce2
|
655c46cc4add275879167b750a5e91f6d00f168e
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-06-24T20:20:18.000Z
|
2021-06-24T20:32:23.000Z
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2010 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: Giangi Sacco
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import math
import datetime
import logging
from iscesys.Component.Component import Component, Port
from isceobj.Util.mathModule import MathModule as MM
from isceobj.Orbit.Orbit import StateVector
# A class to hold three-dimensional basis vectors
class Basis(object):
def __init__(self):
self.x1 = []
self.x2 = []
self.x3 = []
# A class to hold three-dimensional basis vectors for spacecraft baselines
class BaselineBasis(Basis):
def __init__(self):
Basis.__init__(self)
def setPositionVector(self,x):
self.x1 = x
def getPositionVector(self):
return self.x1
def setVelocityVector(self,v):
self.x2 = v
def getVelocityVector(self):
return self.x2
def setCrossTrackVector(self,c):
self.x3 = c
def getCrossTrackVector(self):
return self.x3
BASELINE_LOCATION = Component.Parameter('baselineLocation',
public_name = 'BASELINE_LOCATION',
default = 'all',
type=str,
mandatory=False,
doc = ('Location at which to compute baselines - "all" implies '+
'top, middle, bottom of master image, '+
'"top" implies near start of master image, '+
'"bottom" implies at bottom of master image, '+
'"middle" implies near middle of master image. '+
'To be used in case there is a large shift between images.')
)
class Baseline(Component):
family = 'baseline'
logging_name = 'isce.mroipac.baseline'
parameter_list = (BASELINE_LOCATION,)
# Calculate the Look Angle of the master frame
def calculateLookAngle(self):
lookVector = self.calculateLookVector()
return math.degrees(math.atan2(lookVector[1],lookVector[0]))
# Calculate the look vector of the master frame
def calculateLookVector(self):
try:
z = self.masterFrame.terrainHeight
except:
z = 0.0
cosl = ((self.height-z)*(2*self.radius + self.height + z) +
self.startingRange1*self.startingRange1)/(
2*self.startingRange1*(self.radius + self.height)
)
# print('Height: ', self.height)
# print('Radius: ', self.radius)
# print('Range: ', self.startingRange1)
# print('COSL: ', cosl)
sinl = math.sqrt(1 - cosl*cosl)
return [cosl,sinl]
# Calculate the scalar spacecraft velocity
def calculateScalarVelocity(self,orbit,time):
sv = orbit.interpolateOrbit(time, method='hermite')
v = sv.getVelocity()
normV = MM.norm(v)
return normV
# Given an orbit and a time, calculate an orthogonal basis for cross-track and velocity directions
# based on the spacecraft position
def calculateBasis(self,orbit,time):
sv = orbit.interpolateOrbit(time, method='hermite')
x1 = sv.getPosition()
v = sv.getVelocity()
r = MM.normalizeVector(x1) # Turn the position vector into a unit vector
v = MM.normalizeVector(v) # Turn the velocity vector into a unit vector
c = MM.crossProduct(r,v) # Calculate the vector perpendicular to the platform position and velocity, this is the c, or cross-track vector
c = MM.normalizeVector(c)
v = MM.crossProduct(c,r) # Calculate a the "velocity" component that is perpendicular to the cross-track direction and position
basis = BaselineBasis()
basis.setPositionVector(r)
basis.setVelocityVector(v)
basis.setCrossTrackVector(c)
return basis
# Given two position vectors and a basis, calculate the offset between the two positions in this basis
def calculateBasisOffset(self,x1,x2,basis):
dx = [(x2[j] - x1[j]) for j in range(len(x1))] # Calculate the difference between the master and slave position vectors
z_offset = MM.dotProduct(dx,basis.getVelocityVector()) # Calculate the length of the projection of the difference in position and the "velocity" component
v_offset = MM.dotProduct(dx,basis.getPositionVector())
c_offset = MM.dotProduct(dx,basis.getCrossTrackVector())
return z_offset,v_offset,c_offset
# Calculate the baseline components between two frames
def baseline(self):
#TODO This could be further refactored into a method that calculates the baseline between
#TODO frames when given a master time and a slave time and a method that calls this method
#TODO multiple times to calculate the rate of baseline change over time.
for port in self.inputPorts:
port()
lookVector = self.calculateLookVector()
az_offset = []
vb = []
hb = []
csb = []
asb = []
s = [0.,0.,0.]
if self.baselineLocation.lower() == 'all':
print('Using entire span of image for estimating baselines')
masterTime = [self.masterFrame.getSensingStart(),self.masterFrame.getSensingMid(),self.masterFrame.getSensingStop()]
elif self.baselineLocation.lower() == 'middle':
print('Estimating baselines around center of master image')
masterTime = [self.masterFrame.getSensingMid() - datetime.timedelta(seconds=1.0), self.masterFrame.getSensingMid(), self.masterFrame.getSensingMid() + datetime.timedelta(seconds=1.0)]
elif self.baselineLocation.lower() == 'top':
print('Estimating baselines at top of master image')
masterTime = [self.masterFrame.getSensingStart(), self.masterFrame.getSensingStart() + datetime.timedelta(seconds=1.0), self.masterFrame.getSensingStart() + datetime.timedelta(seconds=2.0)]
elif self.baselineLocation.lower() == 'bottom':
print('Estimating baselines at bottom of master image')
masterTime = [self.masterFrame.getSensingStop() - datetime.timedelta(seconds=2.0), self.masterFrame.getSensingStop() - datetime.timedelta(seconds=1.0), self.masterFrame.getSensingStop()]
else:
raise Exception('Unknown baseline location: {0}'.format(self.baselineLocation))
slaveTime = [self.slaveFrame.getSensingMid() - datetime.timedelta(seconds=1.0), self.slaveFrame.getSensingMid(), self.slaveFrame.getSensingMid() + datetime.timedelta(seconds=1.0)]
# slaveTime = [self.slaveFrame.getSensingStart(),self.slaveFrame.getSensingMid(),self.slaveFrame.getSensingStop()]
for i in range(3):
# Calculate the Baseline at the start of the scene, mid-scene, and the end of the scene
# First, get the position and velocity at the start of the scene
self.logger.info("Sampling time %s" % i)
masterBasis = self.calculateBasis(self.masterOrbit,masterTime[i])
normV = self.calculateScalarVelocity(self.masterOrbit,masterTime[i])
# Calculate the distance moved since the last baseline point
if (i > 0):
deltaT = self._timeDeltaToSeconds(masterTime[i] - masterTime[0])
s[i] = s[i-1] + deltaT*normV
masterSV = self.masterOrbit.interpolateOrbit(masterTime[i], method='hermite')
slaveSV = self.slaveOrbit.interpolateOrbit(slaveTime[i], method='hermite')
x1 = masterSV.getPosition()
x2 = slaveSV.getPosition()
(z_offset,v_offset,c_offset) = self.calculateBasisOffset(x1,x2,masterBasis)
az_offset.append(z_offset) # Save the position offset
# Calculate a new start time
relativeSlaveTime = slaveTime[i] - datetime.timedelta(seconds=(z_offset/normV))
slaveSV = self.slaveOrbit.interpolateOrbit(relativeSlaveTime, method='hermite')
# Recalculate the offsets
x2 = slaveSV.getPosition()
(z_offset,v_offset,c_offset) = self.calculateBasisOffset(x1,x2,masterBasis)
vb.append(v_offset)
hb.append(c_offset)
csb.append(-hb[i]*lookVector[0] + vb[i]*lookVector[1]) # Multiply the horizontal and vertical baseline components by the look angle vector
asb.append(-hb[i]*lookVector[1] - vb[i]*lookVector[0])
#Calculating baseline
crossTrackBaselinePolynomialCoefficients = self.polynomialFit(s,hb)
verticalBaselinePolynomialCoefficients = self.polynomialFit(s,vb)
h_rate = crossTrackBaselinePolynomialCoefficients[1]
# Calculate the gross azimuth and range offsets
azb_avg = (az_offset[0] + az_offset[-1])/2.0
asb_avg = (asb[0] + asb[-1])/2.0
az_offset = (-azb_avg - h_rate*self.startingRange1*lookVector[1])/(self.azimuthPixelSize)
r_offset = (self.startingRange1 - self.startingRange2 - asb_avg)/(self.rangePixelSize)
# Populate class attributes
self.hBaselineTop = crossTrackBaselinePolynomialCoefficients[0]
self.hBaselineRate = crossTrackBaselinePolynomialCoefficients[1]
self.hBaselineAcc = crossTrackBaselinePolynomialCoefficients[2]
self.vBaselineTop = verticalBaselinePolynomialCoefficients[0]
self.vBaselineRate = verticalBaselinePolynomialCoefficients[1]
self.vBaselineAcc = verticalBaselinePolynomialCoefficients[2]
self.pBaselineTop = csb[0]
self.pBaselineBottom = csb[-1]
self.orbSlcAzimuthOffset = az_offset
self.orbSlcRangeOffset = r_offset
self.rangeOffset = self.startingRange1 - self.startingRange2
# Calculate a quadratic fit to the baseline polynomial
def polynomialFit(self,xRef,yRef):
size = len(xRef)
if not (len(xRef) == len(yRef)):
print("Error. Expecting input vectors of same length.")
raise Exception
if not (size == 3):
print("Error. Expecting input vectors of length 3.")
raise Exception
Y = [0]*size
A = [0]*size
M = [[0 for i in range(size) ] for j in range(size)]
for j in range(size):
for i in range(size):
M[j][i] = math.pow(xRef[j],i)
Y[j] = yRef[j]
MInv = MM.invertMatrix(M)
for i in range(size):
for j in range(size):
A[i] += MInv[i][j]*Y[j]
return A
def setRangePixelSize(self,pixelSize):
self.rangePixelSize = pixelSize
return
def setAzimuthPixelSize(self,pixelSize):
self.azimuthPixelSize = pixelSize
return
def setHeight(self,var):
self.height = float(var)
return
def setRadius(self,radius):
self.radius = radius
return
def setMasterStartingRange(self,range):
self.startingRange1 = range
return
def setSlaveStartingRange(self,range):
self.startingRange2 = range
return
def getHBaselineTop(self):
return self.hBaselineTop
def getHBaselineRate(self):
return self.hBaselineRate
def getHBaselineAcc(self):
return self.hBaselineAcc
def getVBaselineTop(self):
return self.vBaselineTop
def getVBaselineRate(self):
return self.vBaselineRate
def getVBaselineAcc(self):
return self.vBaselineAcc
def getPBaselineTop(self):
return self.pBaselineTop
def getPBaselineBottom(self):
return self.pBaselineBottom
def getOrbSlcAzimuthOffset(self):
return self.orbSlcAzimuthOffset
def getOrbSlcRangeOffset(self):
return self.orbSlcRangeOffset
def getRangeOffset(self):
return self.rangeOffset
def getPhaseConst(self):
return self.phaseConst
def getLookAngle(self):
return self.lookAngle
def _timeDeltaToSeconds(self,td):
return (td.microseconds + (td.seconds + td.days * 24.0 * 3600) * 10**6) / 10**6
def addMasterFrame(self):
frame = self._inputPorts.getPort(name='masterFrame').getObject()
self.masterFrame = frame
self.startingRange1 = frame.getStartingRange()
prf = frame.getInstrument().getPulseRepetitionFrequency()
self.rangePixelSize = frame.getInstrument().getRangePixelSize()
self.masterOrbit = frame.getOrbit()
midSV = self.masterOrbit.interpolateOrbit(frame.getSensingMid(), method='hermite')
self.azimuthPixelSize = midSV.getScalarVelocity()/prf
try:
ellipsoid = frame._ellipsoid #UAVSAR frame creates ellipsoid with peg
self.radius = ellipsoid.pegRadCur
self.height = frame.platformHeight
except:
ellipsoid = frame.getInstrument().getPlatform().getPlanet().get_elp()
self.radius = ellipsoid.get_a()
self.height = midSV.calculateHeight(ellipsoid)
def addSlaveFrame(self):
frame = self._inputPorts.getPort(name='slaveFrame').getObject()
self.slaveFrame = frame
self.startingRange2 = frame.getStartingRange()
self.slaveOrbit = frame.getOrbit()
def __init__(self, name=''):
self.masterOrbit = None
self.slaveOrbit = None
self.masterFrame = None
self.slaveFrame = None
self.lookAngle = None
self.rangePixelSize = None
self.azimuthPixelSize = None
self.height = None
self.radius = None
self.startingRange1 = None
self.startingRange2 = None
self.hBaselineTop = None
self.hBaselineRate = None
self.hBaselineAcc = None
self.vBaselineTop = None
self.vBaselineRate = None
self.vBaselineAcc = None
self.pBaselineTop = None
self.pBaselineBottom = None
self.orbSlcAzimuthOffset = None
self.orbSlcRangeOffset = None
self.rangeOffset = None
self.phaseConst = -99999
super(Baseline, self).__init__(family=self.__class__.family, name=name)
self.logger = logging.getLogger('isce.mroipac.baseline')
self.createPorts()
# Satisfy the old Component
self.dictionaryOfOutputVariables = {}
self.dictionaryOfVariables = {}
self.descriptionOfVariables = {}
self.mandatoryVariables = []
self.optionalVariables = []
return None
def createPorts(self):
# Set input ports
# It looks like we really need two orbits, a time, range and azimuth pixel sizes
# the two starting ranges, a planet, and the two prfs
# These provide the orbits
# These provide the range and azimuth pixel sizes, starting ranges,
# satellite heights and times for the first lines
masterFramePort = Port(name='masterFrame',method=self.addMasterFrame)
slaveFramePort = Port(name='slaveFrame',method=self.addSlaveFrame)
self._inputPorts.add(masterFramePort)
self._inputPorts.add(slaveFramePort)
return None
def __str__(self):
retstr = "Initial Baseline estimates \n"
retstr += "Cross-track Baseline: %s\n"
retlst = (self.hBaselineTop,)
retstr += "Vertical Baseline: %s\n"
retlst += (self.vBaselineTop,)
retstr += "Perpendicular Baseline: %s\n"
retlst += (self.pBaselineTop,)
retstr += "Bulk Azimuth Offset: %s\n"
retlst += (self.orbSlcAzimuthOffset,)
retstr += "Bulk Range Offset: %s\n"
retlst += (self.orbSlcRangeOffset,)
return retstr % retlst
| 39.523697
| 202
| 0.655495
| 1,840
| 16,679
| 5.902174
| 0.244565
| 0.016206
| 0.020626
| 0.013812
| 0.160958
| 0.114088
| 0.068048
| 0.058748
| 0.031676
| 0.020258
| 0
| 0.010068
| 0.24372
| 16,679
| 421
| 203
| 39.617577
| 0.85088
| 0.225553
| 0
| 0.105263
| 0
| 0
| 0.07378
| 0.003269
| 0
| 0
| 0
| 0.002375
| 0
| 1
| 0.140351
| false
| 0
| 0.021053
| 0.059649
| 0.294737
| 0.021053
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9cf50080cfd2da35179773577dfa101c0a0615b
| 1,106
|
py
|
Python
|
src/modules/deuces/deck.py
|
Bot-Box/FiveCardStud
|
55e11d7a23becece33658075f922cf007909d058
|
[
"MIT"
] | null | null | null |
src/modules/deuces/deck.py
|
Bot-Box/FiveCardStud
|
55e11d7a23becece33658075f922cf007909d058
|
[
"MIT"
] | 1
|
2020-05-09T20:27:33.000Z
|
2020-05-09T20:27:33.000Z
|
src/modules/deuces/deck.py
|
Bot-Box/FiveCardStud
|
55e11d7a23becece33658075f922cf007909d058
|
[
"MIT"
] | null | null | null |
from random import shuffle as rshuffle
from .card import Card
class Deck:
"""
Class representing a deck. The first time we create, we seed the static
deck with the list of unique card integers. Each object instantiated simply
makes a copy of this object and shuffles it.
"""
_FULL_DECK = []
def __init__(self):
self.shuffle()
def shuffle(self):
# and then shuffle
self.cards = Deck.GetFullDeck()
rshuffle(self.cards)
def draw(self, n=1):
if n == 1:
return self.cards.pop(0)
cards = []
for i in range(n):
cards.append(self.draw())
return cards
def __str__(self):
return Card.print_pretty_cards(self.cards)
@staticmethod
def GetFullDeck():
if Deck._FULL_DECK:
return list(Deck._FULL_DECK)
# create the standard 52 card deck
for rank in Card.STR_RANKS:
for suit, val in Card.CHAR_SUIT_TO_INT_SUIT.items():
Deck._FULL_DECK.append(Card.new(rank + suit))
return list(Deck._FULL_DECK)
| 25.136364
| 79
| 0.605787
| 149
| 1,106
| 4.328859
| 0.449664
| 0.062016
| 0.074419
| 0.055814
| 0.068217
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006553
| 0.310127
| 1,106
| 43
| 80
| 25.72093
| 0.838794
| 0.221519
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0
| 0.076923
| 0.038462
| 0.538462
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9cfb448c497219965f4d51af8838d801a58ed41
| 21,000
|
py
|
Python
|
openidc_client/__init__.py
|
puiterwijk/python-openidc-client
|
cd8d91c0503124305727f38a0f9fe93bb472209c
|
[
"MIT"
] | 6
|
2017-03-16T13:32:11.000Z
|
2021-06-21T19:12:21.000Z
|
openidc_client/__init__.py
|
puiterwijk/python-openidc-client
|
cd8d91c0503124305727f38a0f9fe93bb472209c
|
[
"MIT"
] | 5
|
2017-03-23T19:50:36.000Z
|
2022-01-25T04:45:27.000Z
|
openidc_client/__init__.py
|
puiterwijk/python-openidc-client
|
cd8d91c0503124305727f38a0f9fe93bb472209c
|
[
"MIT"
] | 4
|
2017-03-21T17:34:28.000Z
|
2022-01-24T06:16:19.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016, 2017 Red Hat, Inc.
# Red Hat Author: Patrick Uiterwijk <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Client for applications relying on OpenID Connect for authentication."""
from __future__ import print_function
from copy import copy
import json
import logging
from threading import Lock
import time
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import socket
import os
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from uuid import uuid4 as uuidgen
import webbrowser
from wsgiref import simple_server
import requests
import sys
from openidc_client import release
# The ports that we will try to use for our webserver
WEB_PORTS = [12345, 23456]
class OpenIDCClient(object):
# Internal implementation of tokens:
# Every app id has its own token cache
# The token cache is a json serialized dict
# This dict contains uuid: token pairs
# Every "token" object is a json dict with the following keys:
# idp: The URL of the idp that issued the token
# sub: The subject that owns the token
# access_token: Token value
# token_type: Token type. Currently supported: "Bearer"
# expires_at: Token expiration UTC time. NOTE: Even if the expires_at
# indicates the token should still be valid, it may have been revoked by
# the user! Also, even if it has expired, we might still be able to
# refresh the token.
# refresh_token: The token we can use to refresh the access token
# scopes: A list of scopes that we had requested with the token
def __init__(self, app_identifier, id_provider, id_provider_mapping,
client_id, client_secret=None, use_post=False, useragent=None,
cachedir=None, printfd=sys.stdout):
"""Client for interacting with web services relying on OpenID Connect.
:param app_identifier: Identifier for storage of retrieved tokens
:param id_provider: URL of the identity provider to get tokens from
:param id_provider_mapping: Mapping with URLs to use for specific
endpoints on the IdP.
:kwarg use_post: Whether to use POST submission of client secrets
rather than Authorization header
:kwarg client_id: The Client Identifier used to request credentials
:kwarg client_secret: The client "secret" that goes with the client_id.
May be None if your IdP does not require you to use a secret.
:kwarg useragent: Useragent string to use. If not provided, defaults to
"python-openidc-client/VERSION"
:kwarg cachedir: The directory in which to store the token caches. Will
be put through expanduer. Default is ~/.openidc. If this does not
exist and we are unable to create it, the OSError will be thrown.
:kwargs printfd: The File object to print token instructions to.
"""
self.logger = logging.getLogger(__name__)
self.debug = self.logger.debug
self.app_id = app_identifier
self.use_post = use_post
self.idp = id_provider
self.idp_mapping = id_provider_mapping
self.client_id = client_id
self.client_secret = client_secret
self.useragent = useragent or 'python-openid-client/%s' % \
release.VERSION
self.cachedir = os.path.expanduser(cachedir or '~/.openidc')
self.last_returned_uuid = None
self.problem_reported = False
self.token_to_try = None
self._retrieved_code = None
# TODO: Make cache_lock a filesystem lock so we also lock across
# multiple invocations
self._cache_lock = Lock()
with self._cache_lock:
self.__refresh_cache()
self._valid_cache = []
self._printfd = printfd
def get_token(self, scopes, new_token=True):
"""Function to retrieve tokens with specific scopes.
This function will block until a token is retrieved if requested.
It is always safe to call this though, since if we already have a token
with the current app_identifier that has the required scopes, we will
return it.
This function will return a bearer token or None.
Note that the bearer token might have been revoked by the user or
expired.
In that case, you will want to call report_token_issue() to try to
renew the token or delete the token.
:kwarg scopes: A list of scopes required for the current client.
:kwarg new_token: If True, we will actively request the user to get a
new token with the current scopeset if we do not already have on.
:rtype: string or None
:returns: String bearer token if possible or None
"""
if not isinstance(scopes, list):
raise ValueError('Scopes must be a list')
token = self._get_token_with_scopes(scopes)
if token:
# If we had a valid token, use that
self.last_returned_uuid = token[0]
self.problem_reported = False
return token[1]['access_token']
elif not new_token:
return None
# We did not have a valid token, now comes the hard part...
uuid = self._get_new_token(scopes)
if uuid:
self.last_returned_uuid = uuid
self.problem_reported = False
return self._cache[uuid]['access_token']
def report_token_issue(self):
"""Report an error with the last token that was returned.
This will attempt to renew the token that was last returned.
If that worked, we will return the new access token.
If it did not work, we will return None and remove this token from the
cache.
If you get an indication from your application that the token you sent
was invalid, you should call it.
You should explicitly NOT call this function if the token was valid but
your request failed due to a server error or because the account or
token was lacking specific permissions.
"""
if not self.last_returned_uuid:
raise Exception('Cannot report issue before requesting token')
if self.problem_reported:
# We were reported an issue before. Let's just remove this token.
self._delete_token(self.last_returned_uuid)
return None
refresh_result = self._refresh_token(self.last_returned_uuid)
if not refresh_result:
self._delete_token(self.last_returned_uuid)
return None
else:
self.problem_reported = True
return self._cache[self.last_returned_uuid]['access_token']
def send_request(self, *args, **kwargs):
"""Make an python-requests POST request.
Allarguments and keyword arguments are like the arguments to requests,
except for `scopes`, `new_token` and `auto_refresh` keyword arguments.
`scopes` is required.
:kwarg scopes: Scopes required for this call. If a token is not present
with this token, a new one will be requested unless nonblocking is
True.
:kwarg new_token: If True, we will actively request the user to get a
new token with the current scopeset if we do not already have on.
:kwarg auto_refresh: If False, will not try to automatically report
token issues on 401. This helps with broken apps that may send a
401 return code in incorrect cases.
:kwargs http_method: The HTTP method to use, defaults to POST..
"""
ckwargs = copy(kwargs)
scopes = ckwargs.pop('scopes')
new_token = ckwargs.pop('new_token', True)
auto_refresh = ckwargs.pop('auto_refresh', True)
method = ckwargs.pop('http_method', 'POST')
is_retry = False
if self.token_to_try:
is_retry = True
token = self.token_to_try
self.token_to_try = None
else:
token = self.get_token(scopes, new_token=new_token)
if not token:
return None
if self.use_post:
if 'json' in ckwargs:
raise ValueError('Cannot provide json in a post call')
if method not in ['POST']:
raise ValueError('Cannot use POST tokens in %s method' %
method)
if 'data' not in ckwargs:
ckwargs['data'] = {}
ckwargs['data']['access_token'] = token
else:
if 'headers' not in ckwargs:
ckwargs['headers'] = {}
ckwargs['headers']['Authorization'] = 'Bearer %s' % token
resp = requests.request(method, *args, **ckwargs)
if resp.status_code == 401 and not is_retry:
if not auto_refresh:
return resp
self.token_to_try = self.report_token_issue()
if not self.token_to_try:
return resp
return self.send_request(*args, **kwargs)
elif resp.status_code == 401:
# We got a 401 and this is a retry. Report error
self.report_token_issue()
return resp
else:
return resp
@property
def _cachefile(self):
"""Property to get the cache file name for the current client.
This assures that whenever this file is touched, the cache lock is held
"""
assert self._cache_lock.locked()
return os.path.join(self.cachedir, 'oidc_%s.json' % self.app_id)
def __refresh_cache(self):
"""Refreshes the self._cache from the cache on disk.
Requires cache_lock to be held by caller."""
assert self._cache_lock.locked()
self.debug('Refreshing cache')
if not os.path.isdir(self.cachedir):
self.debug('Creating directory')
os.makedirs(self.cachedir)
if not os.path.exists(self._cachefile):
self.debug('Creating file')
with open(self._cachefile, 'w') as f:
f.write(json.dumps({}))
with open(self._cachefile, 'r') as f:
self._cache = json.loads(f.read())
self.debug('Loaded %i tokens', len(self._cache))
def _refresh_cache(self):
"""Refreshes the self._cache from the cache on disk.
cache_lock may not be held by anyone."""
with self._cache_lock:
self.__refresh_cache()
def __write_cache(self):
"""Wirtes self._cache to cache on disk.
Requires cache_lock to be held by caller."""
assert self._cache_lock.locked()
self.debug('Writing cache with %i tokens', len(self._cache))
with open(self._cachefile, 'w') as f:
f.write(json.dumps(self._cache))
def _add_token(self, token):
"""Adds a token to the cache and writes cache to disk.
cache_lock may not be held by anyone.
:param token: Dict of the token to be added to the cache
"""
uuid = uuidgen().hex
self.debug('Adding token %s to cache', uuid)
with self._cache_lock:
self.__refresh_cache()
self._cache[uuid] = token
self.__write_cache()
return uuid
def _update_token(self, uuid, toupdate):
"""Updates a token in the cache.
cache_lock may not be held by anyone.
:param token: UUID of the token to be updated
:param toupdate: Dict indicating which fields need to be updated
"""
self.debug('Updating token %s in cache, fields %s',
uuid, toupdate.keys())
with self._cache_lock:
self.__refresh_cache()
if uuid not in self._cache:
return None
self._cache[uuid].update(toupdate)
self.__write_cache()
return uuid
def _delete_token(self, uuid):
"""Removes a token from the cache and writes cache to disk.
cache_lock may not be held by anyone.
:param uuid: UUID of the token to be removed from cache
"""
self.debug('Removing token %s from cache', uuid)
with self._cache_lock:
self.__refresh_cache()
if uuid in self._cache:
self.debug('Removing token')
del self._cache[uuid]
self.__write_cache()
else:
self.debug('Token was already gone')
def _get_token_with_scopes(self, scopes):
"""Searches the cache for any tokens that have the requested scopes.
It will prefer to return tokens whose expires_at is still before the
current time, but if no such tokens exist it will return the possibly
expired token: it might be refreshable.
:param scopes: List of scopes that need to be in the returned token
:rtype: (string, dict) or None
:returns: Token UUID and contents or None if no applicable tokens were
found
"""
possible_token = None
self.debug('Trying to get token with scopes %s', scopes)
for uuid in self._cache:
self.debug('Checking %s', uuid)
token = self._cache[uuid]
if token['idp'] != self.idp:
self.debug('Incorrect idp')
continue
if not set(scopes).issubset(set(token['scopes'])):
self.debug('Missing scope: %s not subset of %s',
set(scopes),
set(token['scopes']))
continue
if token['expires_at'] < time.time():
# This is a token that's supposed to still be valid, prefer it
# over any others we have
self.debug('Not yet expired, returning')
return uuid, token
# This is a token that may or may not still be valid
self.debug('Possible')
possible_token = (uuid, token)
if possible_token:
self.debug('Returning possible token')
return possible_token
def _idp_url(self, method):
"""Returns the IdP URL for the requested method.
:param method: The method name in the IdP mapping dict.
:rtype: string
:returns: The IdP URL
"""
if method in self.idp_mapping:
return self.idp + self.idp_mapping[method]
else:
return ValueError('Idp Mapping did not include path for %s'
% method)
def _refresh_token(self, uuid):
"""Tries to refresh a token and put the refreshed token in self._cache
The caller is responsible for either removing the token if it could not
be refreshed or saving the cache if renewal was succesful.
:param uuid: The UUID of the cached token to attempt to refresh.
:rtype: bool
:returns: True if the token was succesfully refreshed, False otherwise
"""
oldtoken = self._cache[uuid]
self.debug('Refreshing token %s', uuid)
data = {'client_id': self.client_id,
'grant_type': 'refresh_token',
'refresh_token': oldtoken['refresh_token']}
if self.client_secret:
data['client_secret'] = self.client_secret
resp = requests.request(
'POST',
self._idp_url('Token'),
data=data)
resp.raise_for_status()
resp = resp.json()
if 'error' in resp:
self.debug('Unable to refresh, error: %s', resp['error'])
return False
self._update_token(
uuid,
{'access_token': resp['access_token'],
'token_type': resp['token_type'],
'refresh_token': resp['refresh_token'],
'expires_at': time.time() + resp['expires_in']})
self.debug('Refreshed until %s', self._cache[uuid]['expires_at'])
return True
def _get_server(self, app):
"""This function returns a SimpleServer with an available WEB_PORT."""
for port in WEB_PORTS:
try:
server = simple_server.make_server('0.0.0.0', port, app)
return server
except socket.error:
# This port did not work. Switch to next one
continue
def _get_new_token(self, scopes):
"""This function kicks off some magic.
We will start a new webserver on one of the WEB_PORTS, and then either
show the user a URL, or if possible, kick off their browser.
This URL will be the Authorization endpoint of the IdP with a request
for our client_id to get a new token with the specified scopes.
The webserver will then need to catch the return with either an
Authorization Code (that we will exchange for an access token) or the
cancellation message.
This function will store the new token in the local cache, add it to
the valid cache, and then return the UUID.
If the user cancelled (or we got another error), we will return None.
"""
def _token_app(environ, start_response):
query = environ['QUERY_STRING']
split = query.split('&')
kv = dict([v.split('=', 1) for v in split])
if 'error' in kv:
self.debug('Error code returned: %s (%s)',
kv['error'], kv.get('error_description'))
self._retrieved_code = False
else:
self._retrieved_code = kv['code']
# Just return a message
start_response('200 OK', [('Content-Type', 'text/plain')])
return [u'You can close this window and return to the CLI'.encode('ascii')]
self._retrieved_code = None
server = self._get_server(_token_app)
if not server:
raise Exception('We were unable to instantiate a webserver')
return_uri = 'http://localhost:%i/' % server.socket.getsockname()[1]
rquery = {}
rquery['scope'] = ' '.join(scopes)
rquery['response_type'] = 'code'
rquery['client_id'] = self.client_id
rquery['redirect_uri'] = return_uri
rquery['response_mode'] = 'query'
query = urlencode(rquery)
authz_url = '%s?%s' % (self._idp_url('Authorization'), query)
print('Please visit %s to grant authorization' % authz_url,
file=self._printfd)
webbrowser.open(authz_url)
server.handle_request()
server.server_close()
assert self._retrieved_code is not None
if self._retrieved_code is False:
# The user cancelled the request
self._retrieved_code = None
self.debug('User cancelled')
return None
self.debug('We got an authorization code!')
data = {'client_id': self.client_id,
'grant_type': 'authorization_code',
'redirect_uri': return_uri,
'code': self._retrieved_code}
if self.client_secret:
data['client_secret'] = self.client_secret
resp = requests.request(
'POST',
self._idp_url('Token'),
data=data)
resp.raise_for_status()
self._retrieved_code = None
resp = resp.json()
if 'error' in resp:
self.debug('Error exchanging authorization code: %s',
resp['error'])
return None
token = {'access_token': resp['access_token'],
'refresh_token': resp['refresh_token'],
'expires_at': time.time() + int(resp['expires_in']),
'idp': self.idp,
'token_type': resp['token_type'],
'scopes': scopes}
# AND WE ARE DONE! \o/
return self._add_token(token)
| 40.384615
| 87
| 0.617
| 2,749
| 21,000
| 4.588214
| 0.191342
| 0.019979
| 0.01213
| 0.012685
| 0.172362
| 0.134464
| 0.120035
| 0.115754
| 0.10727
| 0.075081
| 0
| 0.003163
| 0.307381
| 21,000
| 519
| 88
| 40.462428
| 0.864008
| 0.385571
| 0
| 0.27931
| 0
| 0
| 0.135661
| 0.001921
| 0
| 0
| 0
| 0.001927
| 0.013793
| 1
| 0.058621
| false
| 0
| 0.068966
| 0
| 0.224138
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9cfea74cbe1fffe3e3d0849bdd6679785142bf0
| 7,159
|
py
|
Python
|
eoxserver/services/ows/wps/v10/encoders/parameters.py
|
constantinius/eoxserver_combined
|
68f261133fed65a4e8a6ddba82b0d2845171e4bf
|
[
"OML"
] | 1
|
2017-11-21T22:23:30.000Z
|
2017-11-21T22:23:30.000Z
|
eoxserver/services/ows/wps/v10/encoders/parameters.py
|
constantinius/eoxserver_combined
|
68f261133fed65a4e8a6ddba82b0d2845171e4bf
|
[
"OML"
] | null | null | null |
eoxserver/services/ows/wps/v10/encoders/parameters.py
|
constantinius/eoxserver_combined
|
68f261133fed65a4e8a6ddba82b0d2845171e4bf
|
[
"OML"
] | null | null | null |
#-------------------------------------------------------------------------------
#
# WPS 1.0 parameters' XML encoders
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <[email protected]>
# Martin Paces <[email protected]>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from eoxserver.services.ows.wps.parameters import (
LiteralData, ComplexData, BoundingBoxData,
AllowedAny, AllowedEnum, AllowedRange, AllowedRangeCollection,
AllowedByReference,
)
from eoxserver.services.ows.wps.v10.util import (
OWS, WPS, NIL, ns_ows,
)
#-------------------------------------------------------------------------------
def encode_input_descr(prm):
""" Encode process description input."""
elem = NIL("Input", *_encode_param_common(prm))
elem.attrib["minOccurs"] = ("1", "0")[bool(prm.is_optional)]
elem.attrib["maxOccurs"] = "1"
if isinstance(prm, LiteralData):
elem.append(_encode_literal(prm, True))
elif isinstance(prm, ComplexData):
elem.append(_encode_complex(prm, True))
elif isinstance(prm, BoundingBoxData):
elem.append(_encode_bbox(prm, True))
return elem
def encode_output_descr(prm):
""" Encode process description output."""
elem = NIL("Output", *_encode_param_common(prm))
if isinstance(prm, LiteralData):
elem.append(_encode_literal(prm, False))
elif isinstance(prm, ComplexData):
elem.append(_encode_complex(prm, False))
elif isinstance(prm, BoundingBoxData):
elem.append(_encode_bbox(prm, False))
return elem
def encode_input_exec(prm):
""" Encode common part of the execure response data input."""
return WPS("Input", *_encode_param_common(prm, False))
def encode_output_exec(prm):
""" Encode common part of the execure response data output."""
return WPS("Output", *_encode_param_common(prm))
def encode_output_def(outdef):
""" Encode the execure response output definition."""
attrib = {}
if outdef.uom is not None:
attrib['uom'] = outdef.uom
if outdef.crs is not None:
attrib['crs'] = outdef.crs
if outdef.mime_type is not None:
attrib['mimeType'] = outdef.mime_type
if outdef.encoding is not None:
attrib['encoding'] = outdef.encoding
if outdef.schema is not None:
attrib['schema'] = outdef.schema
if outdef.as_reference is not None:
attrib['asReference'] = 'true' if outdef.as_reference else 'false'
return WPS("Output", *_encode_param_common(outdef, False), **attrib)
def _encode_param_common(prm, title_required=True):
""" Encode common sub-elements of all XML parameters."""
elist = [OWS("Identifier", prm.identifier)]
if prm.title or title_required:
elist.append(OWS("Title", prm.title or prm.identifier))
if prm.abstract:
elist.append(OWS("Abstract", prm.abstract))
return elist
#-------------------------------------------------------------------------------
def _encode_literal(prm, is_input):
dtype = prm.dtype
elem = NIL("LiteralData" if is_input else "LiteralOutput")
elem.append(OWS("DataType", dtype.name, **{
ns_ows("reference"): "http://www.w3.org/TR/xmlschema-2/#%s"%dtype.name,
}))
if prm.uoms:
elem.append(NIL("UOMs",
NIL("Default", OWS("UOM", prm.uoms[0])),
NIL("Supported", *[OWS("UOM", u) for u in prm.uoms])
))
if is_input:
elem.append(_encode_allowed_value(prm.allowed_values))
if prm.default is not None:
elem.append(NIL("DefaultValue", str(prm.default)))
return elem
def _encode_allowed_value(avobj):
enum, ranges, elist = None, [], []
if isinstance(avobj, AllowedAny):
return OWS("AnyValue")
elif isinstance(avobj, AllowedByReference):
return WPS("ValuesReference", **{
ns_ows("reference"): avobj.url,
"valuesForm": avobj.url,
})
elif isinstance(avobj, AllowedEnum):
enum = avobj
elif isinstance(avobj, AllowedRange):
ranges = [avobj]
elif isinstance(avobj, AllowedRangeCollection):
enum, ranges = avobj.enum, avobj.ranges
else:
raise TypeError("Invalid allowed value object! OBJ=%r"%avobj)
dtype = avobj.dtype
ddtype = dtype.get_diff_dtype()
if enum is not None:
elist.extend(OWS("Value", dtype.encode(v)) for v in enum.values)
for range_ in ranges:
attr, elms = {}, []
if range_.closure != 'closed':
attr = {ns_ows("rangeClosure"): range_.closure}
if range_.minval is not None:
elms.append(OWS("MinimumValue", dtype.encode(range_.minval)))
if range_.maxval is not None:
elms.append(OWS("MaximumValue", dtype.encode(range_.maxval)))
if range_.spacing is not None:
elms.append(OWS("Spacing", ddtype.encode(range_.spacing)))
elist.append(OWS("Range", *elms, **attr))
return OWS("AllowedValues", *elist)
#-------------------------------------------------------------------------------
def _encode_complex(prm, is_input):
return NIL("ComplexData" if is_input else "ComplexOutput",
NIL("Default", _encode_format(prm.default_format)),
NIL("Supported", *[_encode_format(f) for f in prm.formats.itervalues()])
)
def _encode_format(frmt):
elem = NIL("Format", NIL("MimeType", frmt.mime_type))
if frmt.encoding is not None:
elem.append(NIL("Encoding", frmt.encoding))
if frmt.schema is not None:
elem.append(NIL("Schema", frmt.schema))
return elem
#-------------------------------------------------------------------------------
def _encode_bbox(prm, is_input):
return NIL("BoundingBoxData" if is_input else "BoundingBoxOutput",
NIL("Default", NIL("CRS", prm.encode_crs(prm.default_crs))),
NIL("Supported", *[NIL("CRS", prm.encode_crs(crs)) for crs in prm.crss])
)
| 38.283422
| 80
| 0.621875
| 862
| 7,159
| 5.056845
| 0.271462
| 0.014912
| 0.026841
| 0.020647
| 0.206699
| 0.140399
| 0.095435
| 0.095435
| 0.095435
| 0.021565
| 0
| 0.002415
| 0.19011
| 7,159
| 186
| 81
| 38.489247
| 0.749396
| 0.289845
| 0
| 0.083333
| 0
| 0
| 0.101117
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091667
| false
| 0
| 0.016667
| 0.016667
| 0.216667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9d317f8ac0c3d87ca7347265d7a9836b41ed098
| 2,481
|
py
|
Python
|
gci-vci-serverless/src/helpers/vp_saves_helpers.py
|
ClinGen/gene-and-variant-curation-tools
|
30f21d8f03d8b5c180c1ce3cb8401b5abc660080
|
[
"MIT"
] | 1
|
2021-09-17T20:39:07.000Z
|
2021-09-17T20:39:07.000Z
|
gci-vci-serverless/src/helpers/vp_saves_helpers.py
|
ClinGen/gene-and-variant-curation-tools
|
30f21d8f03d8b5c180c1ce3cb8401b5abc660080
|
[
"MIT"
] | 133
|
2021-08-29T17:24:26.000Z
|
2022-03-25T17:24:31.000Z
|
gci-vci-serverless/src/helpers/vp_saves_helpers.py
|
ClinGen/gene-and-variant-curation-tools
|
30f21d8f03d8b5c180c1ce3cb8401b5abc660080
|
[
"MIT"
] | null | null | null |
import datetime
import uuid
import simplejson as json
from src.db.s3_client import Client as S3Client
from decimal import Decimal
def get_from_archive(archive_key):
''' Download a VP Save from S3.
:param str archive_key: The vp_save data's location (S3 bucket and file path). This value is required.
'''
if archive_key is None or '/' not in archive_key:
raise ValueError()
bucket, key = archive_key.split('/', 1)
s3_client = S3Client()
try:
archive_object = json.loads(s3_client.get_object(bucket, key)['Body'].read(),parse_float=Decimal)
except Exception as e:
print('ERROR: Error downloading ' + key + ' from ' + bucket + ' bucket. ERROR\n%s' %e)
raise
return archive_object
def build(vp_save={}):
''' Builds and returns a valid vp_save object.
Builds a new vp_save object by creating default values for
required fields and combines any of the given attributes.
'''
vp_save['PK'] = str(uuid.uuid4())
# Set timestamps (for new data)
now = datetime.datetime.now().isoformat()
vp_save['date_created'] = now
vp_save['last_modified'] = now
vp_save['item_type'] = 'vp_save'
return vp_save
def archive(bucket, vp_save_pk, save_data):
''' Archives a vp save data to S3.
Uploads the save data object as a JSON file to S3. The location of the archive
depends on the bucket and the primary key of the save data. If the upload fails,
an exception is raised. If successful, returns the archive location.
:param str bucket: The name of the S3 bucket for the archive. This value is required.
:param str vp_save_pk: The vp_save PK to use as the name of the JSON file. This value is required.
:param obj save_data: The save data object to archive. This value is required.
'''
if bucket is None or len(bucket) <= 0:
raise ValueError()
if vp_save_pk is None or len(vp_save_pk) <= 0:
raise ValueError()
if not save_data:
raise ValueError()
archive_file = __archive_key(save_data) + '/' + vp_save_pk + '.json'
# Upload curation data to S3 archive bucket.
s3_client = S3Client()
try:
s3_client.put_object(
bytes(json.dumps(save_data).encode('UTF-8')),
bucket,
archive_file
)
except Exception as e:
print('ERROR: Error uploading ' + archive_file + ' to ' + bucket + ' bucket. ERROR\n%s' %e)
raise
archive_key_comps = [bucket, archive_file]
return '/'.join(archive_key_comps)
def __archive_key(save_data):
return save_data['PK']
| 27.263736
| 104
| 0.699315
| 389
| 2,481
| 4.300771
| 0.290488
| 0.064555
| 0.033473
| 0.045427
| 0.1315
| 0.069337
| 0.069337
| 0
| 0
| 0
| 0
| 0.009615
| 0.203547
| 2,481
| 90
| 105
| 27.566667
| 0.837045
| 0.359532
| 0
| 0.266667
| 0
| 0
| 0.101816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.111111
| 0.022222
| 0.288889
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9d321dead6bc8e55098581c550215a3e969a2f1
| 464
|
py
|
Python
|
docs/source/auto_examples/plot_usage.py
|
ruhugu/brokenaxes
|
1cfb301c854b3336aeb4dd9a2c329310534dfb21
|
[
"MIT"
] | 362
|
2017-05-01T10:20:56.000Z
|
2022-03-29T21:39:09.000Z
|
docs/source/auto_examples/plot_usage.py
|
ruhugu/brokenaxes
|
1cfb301c854b3336aeb4dd9a2c329310534dfb21
|
[
"MIT"
] | 73
|
2017-04-20T18:54:39.000Z
|
2021-12-02T08:04:21.000Z
|
docs/source/auto_examples/plot_usage.py
|
ruhugu/brokenaxes
|
1cfb301c854b3336aeb4dd9a2c329310534dfb21
|
[
"MIT"
] | 52
|
2017-05-04T13:03:25.000Z
|
2022-03-29T21:39:20.000Z
|
"""
Basic usage
===========
This example presents the basic usage of brokenaxes
"""
import matplotlib.pyplot as plt
from brokenaxes import brokenaxes
import numpy as np
fig = plt.figure(figsize=(5,2))
bax = brokenaxes(xlims=((0, .1), (.4, .7)), ylims=((-1, .7), (.79, 1)), hspace=.05)
x = np.linspace(0, 1, 100)
bax.plot(x, np.sin(10 * x), label='sin')
bax.plot(x, np.cos(10 * x), label='cos')
bax.legend(loc=3)
bax.set_xlabel('time')
bax.set_ylabel('value')
| 21.090909
| 83
| 0.644397
| 79
| 464
| 3.759494
| 0.594937
| 0.161616
| 0.053872
| 0.06734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057214
| 0.133621
| 464
| 21
| 84
| 22.095238
| 0.681592
| 0.163793
| 0
| 0
| 0
| 0
| 0.039578
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9d368d362ab070d71b3363fe0fb20728ec9660d
| 5,985
|
py
|
Python
|
src/entity/002_createRdf.py
|
toyo-bunko/paper_app
|
f988e05cf83711d98c5ed735c0fd74fcf11e0f05
|
[
"Apache-2.0"
] | 1
|
2021-02-28T15:38:37.000Z
|
2021-02-28T15:38:37.000Z
|
src/entity/002_createRdf.py
|
toyo-bunko/paper_app
|
f988e05cf83711d98c5ed735c0fd74fcf11e0f05
|
[
"Apache-2.0"
] | null | null | null |
src/entity/002_createRdf.py
|
toyo-bunko/paper_app
|
f988e05cf83711d98c5ed735c0fd74fcf11e0f05
|
[
"Apache-2.0"
] | null | null | null |
import shutil
import os
import json
import glob
import yaml
import sys
import urllib
import ssl
import csv
import time
import requests
import json
import csv
from rdflib import URIRef, BNode, Literal, Graph
from rdflib.namespace import RDF, RDFS, FOAF, XSD
from rdflib import Namespace
all = Graph()
with open("data/dict.json") as f:
ln_map = json.load(f)
st_path = "../data/index.json"
with open(st_path) as f:
result = json.load(f)
uris = []
for obj in result:
fields = ["spatial", "agential"]
for field in fields:
values = obj[field]
for value in values:
uri = "chname:"+value
if field == "spatial":
uri = "place:"+value
if uri not in uris:
uris.append(uri)
for uri in uris:
print(uri)
tmp = uri.split(":")
prefix = tmp[0]
suffix = tmp[1]
ln = suffix
ln_org = ""
if ln in ln_map:
ln_org = ln
ln = ln_map[ln]
if len(ln) > 20:
continue
# ln = obj["uri"].split(":")[1]
'''
wiki_path = "data/wikidata/"+ln+".json"
wiki = {}
if os.path.exists(wiki_path):
with open(wiki_path) as f:
wiki = json.load(f)
# sameAs
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(wiki_url))
all.add(stmt)
obj = wiki["entities"][wiki_url.split("/")[-1]]
# description
if "descriptions" in obj and "ja" in obj["descriptions"]:
stmt = (subject, URIRef("http://schema.org/description"), Literal(obj["descriptions"]["ja"]["value"], lang="ja"))
all.add(stmt)
# label
if "labels" in obj and "ja" in obj["labels"]:
stmt = (subject, RDFS.label, Literal(obj["labels"]["ja"]["value"]))
all.add(stmt)
ln = wiki_url.split("/")[-1]
'''
db_path = "data/dbpedia_ja/"+ln+".json"
wiki_path = "data/wikidata/"+ln+".json"
db = {}
wiki = {}
if os.path.exists(db_path):
with open(db_path) as f:
db = json.load(f)
if os.path.exists(wiki_path):
with open(wiki_path) as f:
wiki = json.load(f)
db_uri = "http://ja.dbpedia.org/resource/"+ln
if db_uri not in db:
print("not" , db_uri)
continue
# ######
subject = URIRef("https://shibusawa-dlab.github.io/lab1/api/"+prefix+"/"+ln)
if prefix == "chname":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Agent"))
all.add(stmt)
elif prefix == "time":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Time"))
all.add(stmt)
elif prefix == "place":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Place"))
all.add(stmt)
elif prefix == "event":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Event"))
all.add(stmt)
elif prefix == "org":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Organization"))
all.add(stmt)
elif prefix == "keyword":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Keyword"))
all.add(stmt)
elif prefix == "type":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Type"))
all.add(stmt)
# ######
obj = db[db_uri]
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(db_uri))
all.add(stmt)
if "http://dbpedia.org/ontology/thumbnail" in obj:
stmt = (subject, URIRef("http://schema.org/image"), URIRef(obj["http://dbpedia.org/ontology/thumbnail"][0]["value"]))
all.add(stmt)
if "http://www.w3.org/2000/01/rdf-schema#label" in obj:
labels = obj["http://www.w3.org/2000/01/rdf-schema#label"]
for label in labels:
if label["lang"] == "ja":
stmt = (subject, RDFS.label, Literal(label["value"]))
all.add(stmt)
if "http://www.w3.org/2000/01/rdf-schema#comment" in obj:
labels = obj["http://www.w3.org/2000/01/rdf-schema#comment"]
for label in labels:
stmt = (subject, URIRef("http://schema.org/description"), Literal(label["value"], lang=label["lang"]))
all.add(stmt)
if "http://www.w3.org/2002/07/owl#sameAs" in obj:
labels = obj["http://www.w3.org/2002/07/owl#sameAs"]
for label in labels:
value = label["value"]
if "http://dbpedia.org" in value or "http://ja.dbpedia.org" in value or "www.wikidata.org" in value:
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(value))
all.add(stmt)
# 位置情報
'''
if "point" in obj and prefix == "place":
value = obj["point"]["value"].split(" ")
# addGeo関数
geoUri = addGeo({
"lat" : float(value[0]),
"long": float(value[1])
})
stmt = (subject, URIRef("http://schema.org/geo"), geoUri)
if suffix not in places:
places[suffix] = {
"lat" : float(value[0]),
"long": float(value[1])
}
all.add(stmt)
'''
# 正規化前
if ln_org != "" and ln != ln_org:
stmt = (subject, URIRef("http://schema.org/name"), Literal(ln_org))
all.add(stmt)
path = "data/all.json"
all.serialize(destination=path, format='json-ld')
all.serialize(destination=path.replace(".json", ".rdf"), format='pretty-xml')
| 29.338235
| 129
| 0.513116
| 743
| 5,985
| 4.095559
| 0.179004
| 0.061453
| 0.055866
| 0.035491
| 0.495892
| 0.394676
| 0.338153
| 0.338153
| 0.261255
| 0.253368
| 0
| 0.018863
| 0.326817
| 5,985
| 204
| 130
| 29.338235
| 0.736411
| 0.00685
| 0
| 0.198198
| 0
| 0
| 0.238463
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.144144
| 0
| 0.144144
| 0.018018
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9d4e94302ccb3b8bcc4d40fbc60872ee3780872
| 2,107
|
py
|
Python
|
client/tests/test_config_read_tool.py
|
nuft/can-bootloader
|
18dd77dae1fb2328dac1fd1df2c9e5d5c936771e
|
[
"BSD-2-Clause"
] | null | null | null |
client/tests/test_config_read_tool.py
|
nuft/can-bootloader
|
18dd77dae1fb2328dac1fd1df2c9e5d5c936771e
|
[
"BSD-2-Clause"
] | null | null | null |
client/tests/test_config_read_tool.py
|
nuft/can-bootloader
|
18dd77dae1fb2328dac1fd1df2c9e5d5c936771e
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
try:
from unittest.mock import *
except ImportError:
from mock import *
from msgpack import *
import bootloader_read_config
from commands import *
import sys
import json
class ReadConfigToolTestCase(unittest.TestCase):
@patch('utils.write_command_retry')
@patch('utils.write_command')
@patch('utils.open_connection')
@patch('builtins.print')
def test_integration(self, print_mock, open_conn, write_command,
write_command_retry):
sys.argv = "test.py -p /dev/ttyUSB0 0 1 2".split()
configs = [{'id': i} for i in range(3)]
write_command_retry.return_value = {
i: packb(configs[i]) for i in range(3)
}
open_conn.return_value = object()
bootloader_read_config.main()
write_command_retry.assert_any_call(open_conn.return_value,
encode_read_config(), [0, 1, 2])
all_configs = {i: configs[i] for i in range(3)}
print_mock.assert_any_call(json.dumps(all_configs, indent=4,
sort_keys=True))
@patch('utils.open_connection')
@patch('utils.write_command_retry')
@patch('utils.write_command')
@patch('utils.read_can_datagrams')
@patch('builtins.print')
def test_network_discovery(self, print_mock, read_can_datagram,
write_command, write_command_retry, open_conn):
"""
Checks if we can perform a whole network discovery.
"""
sys.argv = "test.py -p /dev/ttyUSB0 --all".split()
# The first two board answers the ping
board_answers = [(b'', [0], i) for i in range(1, 3)] + [None]
read_can_datagram.return_value = iter(board_answers)
write_command_retry.return_value = {
i: packb({'id': i}) for i in range(1, 3)
}
bootloader_read_config.main()
write_command.assert_any_call(open_conn.return_value,
encode_ping(),
list(range(1, 128)))
| 30.985294
| 78
| 0.596108
| 258
| 2,107
| 4.620155
| 0.329457
| 0.120805
| 0.099832
| 0.029362
| 0.514262
| 0.391779
| 0.317114
| 0.162752
| 0.098993
| 0.098993
| 0
| 0.014208
| 0.298529
| 2,107
| 67
| 79
| 31.447761
| 0.792287
| 0.04224
| 0
| 0.26087
| 0
| 0
| 0.122429
| 0.058204
| 0
| 0
| 0
| 0
| 0.065217
| 1
| 0.043478
| false
| 0
| 0.195652
| 0
| 0.26087
| 0.108696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9d51a8133c12a74117e8b569f8ace23d5fb49e6
| 5,499
|
py
|
Python
|
bot.py
|
Pyrrolidine/letterboxd-bot
|
b2cd1364e00c3ec6fb70be9c8be7a8b707a8ffbe
|
[
"MIT"
] | 1
|
2021-03-14T20:01:53.000Z
|
2021-03-14T20:01:53.000Z
|
bot.py
|
Pyrrolidine/letterboxd-bot
|
b2cd1364e00c3ec6fb70be9c8be7a8b707a8ffbe
|
[
"MIT"
] | null | null | null |
bot.py
|
Pyrrolidine/letterboxd-bot
|
b2cd1364e00c3ec6fb70be9c8be7a8b707a8ffbe
|
[
"MIT"
] | null | null | null |
import logging
from asyncio import sleep
import discord
from discord.ext import commands
from config import SETTINGS
from crew import crew_embed
from diary import diary_embed
from film import film_embed
from helpers import LetterboxdError
from list_ import list_embed
from review import review_embed
from user import user_embed
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s | %(message)s',
datefmt='%m/%d %H:%M:%S')
bot = commands.Bot(command_prefix='!', case_insensitive=True)
bot.remove_command('help')
@bot.event
async def on_ready():
logging.info(
'Logged in %d servers as %s' % (len(bot.guilds), bot.user.name))
bot.loop.create_task(update_stats())
@bot.event
async def on_message(message):
if message.content.startswith('!'):
message.content = message.content.replace('’', '').replace('‘', '')
await bot.process_commands(message)
async def update_stats():
while True:
await bot.change_presence(
activity=discord.Game('!helplb - {} servers'.format(
len(bot.guilds))))
await sleep(900)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send('This command requires a parameter.')
elif isinstance(error, commands.BotMissingPermissions):
await ctx.send('This command requires the {} permission.'.format(
', '.join(err for err in error.missing_perms)))
elif isinstance(error, (commands.CommandNotFound, commands.CheckFailure)):
return
elif isinstance(error, commands.CommandInvokeError):
if isinstance(error.original, discord.HTTPException):
return
else:
await ctx.send('Sorry, the command crashed. :/')
logging.error(ctx.message.content)
raise error
async def send_msg(ctx, msg):
if isinstance(msg, discord.Embed):
await ctx.send(embed=msg)
else:
await ctx.send(msg)
# Commands
@bot.command()
async def helplb(ctx):
help_embed = discord.Embed(colour=discord.Color.from_rgb(54, 57, 62))
help_embed.set_thumbnail(url='https://i.imgur.com/Kr1diFu.png')
help_embed.set_author(
name='Letterboxd Bot', icon_url='https://i.imgur.com/5VALKVy.jpg')
help_embed.set_footer(
text='Created by Porkepik#2664',
icon_url='https://i.imgur.com/li4cLpd.png')
for key, value in SETTINGS['help'].items():
help_embed.add_field(name=key, value=value, inline=False)
help_embed.description = 'Invite Bot | '\
+ '[GitHub](https://github.com/Porkepik/Letterboxd-Bot)'
await ctx.send(embed=help_embed)
@bot.command()
async def user(ctx, username):
try:
msg = await user_embed(username)
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
@bot.command()
async def diary(ctx, username):
try:
msg = await diary_embed(username)
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
@bot.command(aliases=['actor', 'actress', 'director'])
async def crew(ctx, *, arg):
try:
msg = await crew_embed(arg, ctx.invoked_with)
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
@bot.command(aliases=['movie'])
async def film(ctx, *, arg):
try:
# eiga.me ratings for specific servers
if ctx.guild and ctx.guild.id in SETTINGS['mkdb_servers']:
msg = await film_embed(arg, True)
else:
msg = await film_embed(arg)
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
async def check_if_two_args(ctx):
msg = ctx.message.content.split()
if len(msg) < 3:
await ctx.send('This command requires 2 parameters.')
return len(msg) > 2
@bot.command(name='list')
@commands.check(check_if_two_args)
async def list_(ctx, username, *args):
try:
msg = await list_embed(username, ' '.join(str(i) for i in args))
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
@bot.command(aliases=['entry'])
@commands.check(check_if_two_args)
async def review(ctx, username, *args):
try:
msg = await review_embed(username, ' '.join(str(i) for i in args))
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
@bot.command(name='del')
@commands.bot_has_permissions(manage_messages=True)
async def delete(ctx):
await ctx.message.delete()
found_bot_msg = False
found_usr_cmd = False
cmd_list = list()
for command in bot.commands:
cmd_list.append('!' + command.name)
for alias in command.aliases:
cmd_list.append('!' + alias)
async for log_message in ctx.channel.history(limit=30):
if log_message.author.id == bot.user.id and not found_bot_msg:
bot_message = log_message
found_bot_msg = True
elif found_bot_msg:
if log_message.content:
first_word = log_message.content.split()[0]
else:
continue
if first_word in cmd_list:
found_usr_cmd = True
cmd_message = log_message
break
if found_usr_cmd:
if not ctx.author.permissions_in(ctx.channel).manage_messages:
if not cmd_message.author.id == ctx.author.id:
return
await cmd_message.delete()
await bot_message.delete()
bot.run(SETTINGS['discord'])
| 28.640625
| 78
| 0.651755
| 731
| 5,499
| 4.771546
| 0.253078
| 0.03211
| 0.024083
| 0.026089
| 0.249713
| 0.205275
| 0.151663
| 0.151663
| 0.131594
| 0.131594
| 0
| 0.005221
| 0.233679
| 5,499
| 191
| 79
| 28.790576
| 0.822496
| 0.008183
| 0
| 0.256579
| 0
| 0
| 0.090626
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078947
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9d5cc7533855c3c985b9ccbdc0f7d78d12441b1
| 746
|
py
|
Python
|
Complab assignment.py
|
peteboi/Python-Scripts
|
d84e352c41cff3f459d88c83bc81f6dc2f25ed05
|
[
"MIT"
] | null | null | null |
Complab assignment.py
|
peteboi/Python-Scripts
|
d84e352c41cff3f459d88c83bc81f6dc2f25ed05
|
[
"MIT"
] | null | null | null |
Complab assignment.py
|
peteboi/Python-Scripts
|
d84e352c41cff3f459d88c83bc81f6dc2f25ed05
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def orbit(u):
x,y,v_x,v_y = u
r=np.hypot(x,y)
#r= 1.521e+06
#M,G=1.989e+30,6.7e-11
M,G=20,110
f=G*M/r**3
return np.array([v_x,v_y,-f*x,-f*y])
def RK4(f,u,dt):
k1=f(u)*dt
k2=f(u+0.5*k1)*dt
k3=f(u+0.5*k2)*dt
k4=f(u+k3)*dt
return u+(k1+2*k2+2*k3+k4)/6
def RK4_int(f,y0,tspan):
y=np.zeros([len(tspan),len(y0)])
y[0,:] =y0
for k in range (1,len(tspan)):
y[k,:] = RK4(f,y[k-1],tspan[k]-tspan[k-1])
return y
dt=0.1
t = np.arange(0,10,dt)
y0=np.array([10, 0.0, 10, 10])
sol_rk4=RK4_int(orbit,y0,t)
x,y,v_x,v_y = sol_rk4.T
plt.grid()
plt.plot(x,y)
plt.show()
| 19.128205
| 51
| 0.518767
| 174
| 746
| 2.166667
| 0.33908
| 0.026525
| 0.023873
| 0.03183
| 0.03183
| 0.03183
| 0
| 0
| 0
| 0
| 0
| 0.122995
| 0.247989
| 746
| 38
| 52
| 19.631579
| 0.54902
| 0.072386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.071429
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9d5e8ec4bcf85e917876d27f935eeb707d35bc9
| 675
|
py
|
Python
|
factory_generator/management/commands/generate_factories.py
|
gamabounty/django-factory-generator
|
284184b22f3564a7a915ac3f3363e588d3721158
|
[
"MIT"
] | 10
|
2019-04-19T03:00:09.000Z
|
2022-02-23T16:17:43.000Z
|
factory_generator/management/commands/generate_factories.py
|
charlesthk/django-factory-generator
|
cd0f7aa5b4ecc2bbe8f30a081238056c653d7265
|
[
"MIT"
] | 2
|
2020-05-10T00:40:51.000Z
|
2021-02-28T11:31:26.000Z
|
factory_generator/management/commands/generate_factories.py
|
charlesthk/django-factory-generator
|
cd0f7aa5b4ecc2bbe8f30a081238056c653d7265
|
[
"MIT"
] | 6
|
2019-12-19T16:26:00.000Z
|
2021-05-13T23:42:35.000Z
|
import os
from django.apps import apps
from django.core.management.base import BaseCommand
from factory_generator.generator import FactoryAppGenerator
class Command(BaseCommand):
help = 'Create model factories for all installed apps'
def handle(self, *args, **options):
created_files = []
for app in apps.get_app_configs():
factory_app_generator = FactoryAppGenerator(app)
created_files += factory_app_generator.create_files()
self.stdout.write(self.style.SUCCESS('Successfully created factories:'))
for created_file in created_files:
self.stdout.write(self.style.SUCCESS('- ' + created_file))
| 33.75
| 80
| 0.715556
| 80
| 675
| 5.875
| 0.475
| 0.076596
| 0.080851
| 0.085106
| 0.153191
| 0.153191
| 0.153191
| 0
| 0
| 0
| 0
| 0
| 0.198519
| 675
| 19
| 81
| 35.526316
| 0.868762
| 0
| 0
| 0
| 0
| 0
| 0.115556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9d80db949c5d5f415b809076411a2404da55e53
| 10,912
|
py
|
Python
|
sympy/combinatorics/testutil.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | 2
|
2019-05-18T22:36:49.000Z
|
2019-05-24T05:56:16.000Z
|
sympy/combinatorics/testutil.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | 1
|
2020-04-22T12:45:26.000Z
|
2020-04-22T12:45:26.000Z
|
sympy/combinatorics/testutil.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | 3
|
2021-02-16T16:40:49.000Z
|
2022-03-07T18:28:41.000Z
|
from sympy.combinatorics import Permutation
from sympy.combinatorics.util import _distribute_gens_by_base
rmul = Permutation.rmul
def _cmp_perm_lists(first, second):
"""
Compare two lists of permutations as sets.
This is used for testing purposes. Since the array form of a
permutation is currently a list, Permutation is not hashable
and cannot be put into a set.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _cmp_perm_lists
>>> a = Permutation([0, 2, 3, 4, 1])
>>> b = Permutation([1, 2, 0, 4, 3])
>>> c = Permutation([3, 4, 0, 1, 2])
>>> ls1 = [a, b, c]
>>> ls2 = [b, c, a]
>>> _cmp_perm_lists(ls1, ls2)
True
"""
return {tuple(a) for a in first} == \
{tuple(a) for a in second}
def _naive_list_centralizer(self, other, af=False):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Return a list of elements for the centralizer of a subgroup/set/element.
This is a brute force implementation that goes over all elements of the
group and checks for membership in the centralizer. It is used to
test ``.centralizer()`` from ``sympy.combinatorics.perm_groups``.
Examples
========
>>> from sympy.combinatorics.testutil import _naive_list_centralizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> _naive_list_centralizer(D, D)
[Permutation([0, 1, 2, 3]), Permutation([2, 3, 0, 1])]
See Also
========
sympy.combinatorics.perm_groups.centralizer
"""
from sympy.combinatorics.permutations import _af_commutes_with
if hasattr(other, 'generators'):
elements = list(self.generate_dimino(af=True))
gens = [x._array_form for x in other.generators]
commutes_with_gens = lambda x: all(_af_commutes_with(x, gen) for gen in gens)
centralizer_list = []
if not af:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(Permutation._af_new(element))
else:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(element)
return centralizer_list
elif hasattr(other, 'getitem'):
return _naive_list_centralizer(self, PermutationGroup(other), af)
elif hasattr(other, 'array_form'):
return _naive_list_centralizer(self, PermutationGroup([other]), af)
def _verify_bsgs(group, base, gens):
"""
Verify the correctness of a base and strong generating set.
This is a naive implementation using the definition of a base and a strong
generating set relative to it. There are other procedures for
verifying a base and strong generating set, but this one will
serve for more robust testing.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> _verify_bsgs(A, A.base, A.strong_gens)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims
"""
from sympy.combinatorics.perm_groups import PermutationGroup
strong_gens_distr = _distribute_gens_by_base(base, gens)
current_stabilizer = group
for i in range(len(base)):
candidate = PermutationGroup(strong_gens_distr[i])
if current_stabilizer.order() != candidate.order():
return False
current_stabilizer = current_stabilizer.stabilizer(base[i])
if current_stabilizer.order() != 1:
return False
return True
def _verify_centralizer(group, arg, centr=None):
"""
Verify the centralizer of a group/set/element inside another group.
This is used for testing ``.centralizer()`` from
``sympy.combinatorics.perm_groups``
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _verify_centralizer
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> centr = PermutationGroup([Permutation([0, 1, 2, 3, 4])])
>>> _verify_centralizer(S, A, centr)
True
See Also
========
_naive_list_centralizer,
sympy.combinatorics.perm_groups.PermutationGroup.centralizer,
_cmp_perm_lists
"""
if centr is None:
centr = group.centralizer(arg)
centr_list = list(centr.generate_dimino(af=True))
centr_list_naive = _naive_list_centralizer(group, arg, af=True)
return _cmp_perm_lists(centr_list, centr_list_naive)
def _verify_normal_closure(group, arg, closure=None):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Verify the normal closure of a subgroup/subset/element in a group.
This is used to test
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.testutil import _verify_normal_closure
>>> S = SymmetricGroup(3)
>>> A = AlternatingGroup(3)
>>> _verify_normal_closure(S, A, closure=A)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
"""
if closure is None:
closure = group.normal_closure(arg)
conjugates = set()
if hasattr(arg, 'generators'):
subgr_gens = arg.generators
elif hasattr(arg, '__getitem__'):
subgr_gens = arg
elif hasattr(arg, 'array_form'):
subgr_gens = [arg]
for el in group.generate_dimino():
for gen in subgr_gens:
conjugates.add(gen ^ el)
naive_closure = PermutationGroup(list(conjugates))
return closure.is_subgroup(naive_closure)
def canonicalize_naive(g, dummies, sym, *v):
"""
Canonicalize tensor formed by tensors of the different types
g permutation representing the tensor
dummies list of dummy indices
msym symmetry of the metric
v is a list of (base_i, gens_i, n_i, sym_i) for tensors of type `i`
base_i, gens_i BSGS for tensors of this type
n_i number ot tensors of type `i`
sym_i symmetry under exchange of two component tensors of type `i`
None no symmetry
0 commuting
1 anticommuting
Return 0 if the tensor is zero, else return the array form of
the permutation representing the canonical form of the tensor.
Examples
========
>>> from sympy.combinatorics.testutil import canonicalize_naive
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> g = Permutation([1, 3, 2, 0, 4, 5])
>>> base2, gens2 = get_symmetric_group_sgs(2)
>>> canonicalize_naive(g, [2, 3], 0, (base2, gens2, 2, 0))
[0, 2, 1, 3, 4, 5]
"""
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.tensor_can import gens_products, dummy_sgs
from sympy.combinatorics.permutations import Permutation, _af_rmul
v1 = []
for i in range(len(v)):
base_i, gens_i, n_i, sym_i = v[i]
v1.append((base_i, gens_i, [[]]*n_i, sym_i))
size, sbase, sgens = gens_products(*v1)
dgens = dummy_sgs(dummies, sym, size-2)
if isinstance(sym, int):
num_types = 1
dummies = [dummies]
sym = [sym]
else:
num_types = len(sym)
dgens = []
for i in range(num_types):
dgens.extend(dummy_sgs(dummies[i], sym[i], size - 2))
S = PermutationGroup(sgens)
D = PermutationGroup([Permutation(x) for x in dgens])
dlist = list(D.generate(af=True))
g = g.array_form
st = set()
for s in S.generate(af=True):
h = _af_rmul(g, s)
for d in dlist:
q = tuple(_af_rmul(d, h))
st.add(q)
a = list(st)
a.sort()
prev = (0,)*size
for h in a:
if h[:-2] == prev[:-2]:
if h[-1] != prev[-1]:
return 0
prev = h
return list(a[0])
def graph_certificate(gr):
"""
Return a certificate for the graph
gr adjacency list
The graph is assumed to be unoriented and without
external lines.
Associate to each vertex of the graph a symmetric tensor with
number of indices equal to the degree of the vertex; indices
are contracted when they correspond to the same line of the graph.
The canonical form of the tensor gives a certificate for the graph.
This is not an efficient algorithm to get the certificate of a graph.
Examples
========
>>> from sympy.combinatorics.testutil import graph_certificate
>>> gr1 = {0:[1, 2, 3, 5], 1:[0, 2, 4], 2:[0, 1, 3, 4], 3:[0, 2, 4], 4:[1, 2, 3, 5], 5:[0, 4]}
>>> gr2 = {0:[1, 5], 1:[0, 2, 3, 4], 2:[1, 3, 5], 3:[1, 2, 4, 5], 4:[1, 3, 5], 5:[0, 2, 3, 4]}
>>> c1 = graph_certificate(gr1)
>>> c2 = graph_certificate(gr2)
>>> c1
[0, 2, 4, 6, 1, 8, 10, 12, 3, 14, 16, 18, 5, 9, 15, 7, 11, 17, 13, 19, 20, 21]
>>> c1 == c2
True
"""
from sympy.combinatorics.permutations import _af_invert
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize
items = list(gr.items())
items.sort(key=lambda x: len(x[1]), reverse=True)
pvert = [x[0] for x in items]
pvert = _af_invert(pvert)
# the indices of the tensor are twice the number of lines of the graph
num_indices = 0
for v, neigh in items:
num_indices += len(neigh)
# associate to each vertex its indices; for each line
# between two vertices assign the
# even index to the vertex which comes first in items,
# the odd index to the other vertex
vertices = [[] for i in items]
i = 0
for v, neigh in items:
for v2 in neigh:
if pvert[v] < pvert[v2]:
vertices[pvert[v]].append(i)
vertices[pvert[v2]].append(i+1)
i += 2
g = []
for v in vertices:
g.extend(v)
assert len(g) == num_indices
g += [num_indices, num_indices + 1]
size = num_indices + 2
assert sorted(g) == list(range(size))
g = Permutation(g)
vlen = [0]*(len(vertices[0])+1)
for neigh in vertices:
vlen[len(neigh)] += 1
v = []
for i in range(len(vlen)):
n = vlen[i]
if n:
base, gens = get_symmetric_group_sgs(i)
v.append((base, gens, n, 0))
v.reverse()
dummies = list(range(num_indices))
can = canonicalize(g, dummies, 0, *v)
return can
| 32.47619
| 98
| 0.641679
| 1,482
| 10,912
| 4.588394
| 0.176113
| 0.09
| 0.093824
| 0.049412
| 0.340147
| 0.284853
| 0.215147
| 0.162647
| 0.123824
| 0.123824
| 0
| 0.023035
| 0.248076
| 10,912
| 335
| 99
| 32.573134
| 0.805728
| 0.363361
| 0
| 0.101449
| 0
| 0
| 0.011052
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 1
| 0.050725
| false
| 0
| 0.07971
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9da1ced032a66e58537bdeecea30c322d1a2f01
| 644
|
py
|
Python
|
malleefowl/tests/test_wps_caps.py
|
Ouranosinc/malleefowl
|
685a4cabe4c4ccafc2721a50e1f8178b8b81689e
|
[
"Apache-2.0"
] | null | null | null |
malleefowl/tests/test_wps_caps.py
|
Ouranosinc/malleefowl
|
685a4cabe4c4ccafc2721a50e1f8178b8b81689e
|
[
"Apache-2.0"
] | 4
|
2017-09-21T17:14:45.000Z
|
2020-11-11T03:20:42.000Z
|
malleefowl/tests/test_wps_caps.py
|
Ouranosinc/malleefowl
|
685a4cabe4c4ccafc2721a50e1f8178b8b81689e
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from pywps import Service
from pywps.tests import assert_response_success
from .common import client_for
from malleefowl.processes import processes
def test_wps_caps():
client = client_for(Service(processes=processes))
resp = client.get(service='wps', request='getcapabilities', version='1.0.0')
names = resp.xpath_text('/wps:Capabilities'
'/wps:ProcessOfferings'
'/wps:Process'
'/ows:Identifier')
assert sorted(names.split()) == [
'download',
'esgsearch',
'thredds_download',
'workflow'
]
| 28
| 80
| 0.608696
| 65
| 644
| 5.907692
| 0.6
| 0.046875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006508
| 0.284161
| 644
| 22
| 81
| 29.272727
| 0.826464
| 0
| 0
| 0
| 0
| 0
| 0.200311
| 0.032609
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.055556
| false
| 0
| 0.277778
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9dd8d48aa39f42683555f052c81e9f33f26c3cd
| 1,835
|
py
|
Python
|
setup.py
|
CallumJHays/pyngrok
|
e1a28948d1d8cf42f8eed1b166a2caf6b2a68066
|
[
"MIT"
] | null | null | null |
setup.py
|
CallumJHays/pyngrok
|
e1a28948d1d8cf42f8eed1b166a2caf6b2a68066
|
[
"MIT"
] | null | null | null |
setup.py
|
CallumJHays/pyngrok
|
e1a28948d1d8cf42f8eed1b166a2caf6b2a68066
|
[
"MIT"
] | null | null | null |
from setuptools import setup
__author__ = "Alex Laird"
__copyright__ = "Copyright 2019, Alex Laird"
__version__ = "1.4.0"
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="pyngrok",
version=__version__,
packages=["pyngrok"],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
install_requires=[
"future",
"pyyaml"
],
entry_points="""
[console_scripts]
ngrok=pyngrok.ngrok:run
""",
description="A Python wrapper for Ngrok.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Alex Laird",
author_email="[email protected]",
url="https://github.com/alexdlaird/pyngrok",
download_url="https://github.com/alexdlaird/pyngrok/archive/{}.tar.gz".format(__version__),
keywords=["ngrok", "tunnel", "tunneling", "webhook", "localhost"],
license="MIT",
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix"
]
)
| 34.622642
| 95
| 0.611444
| 184
| 1,835
| 5.923913
| 0.532609
| 0.122018
| 0.16055
| 0.095413
| 0.062385
| 0.062385
| 0
| 0
| 0
| 0
| 0
| 0.01983
| 0.230518
| 1,835
| 52
| 96
| 35.288462
| 0.752125
| 0
| 0
| 0
| 0
| 0.020408
| 0.572752
| 0.023978
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020408
| 0
| 0.020408
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9ddd794f7ce3da3377a0064524099ee9b8e1fd8
| 1,377
|
py
|
Python
|
pipelines/trackml.py
|
texasmichelle/kubeflow-cern
|
886925fad5c37a72f6999c1100584fa8e4a0adae
|
[
"Apache-2.0"
] | 4
|
2019-06-06T20:10:08.000Z
|
2021-02-19T11:59:39.000Z
|
pipelines/trackml.py
|
texasmichelle/kubeflow-cern
|
886925fad5c37a72f6999c1100584fa8e4a0adae
|
[
"Apache-2.0"
] | null | null | null |
pipelines/trackml.py
|
texasmichelle/kubeflow-cern
|
886925fad5c37a72f6999c1100584fa8e4a0adae
|
[
"Apache-2.0"
] | 1
|
2019-10-13T03:51:16.000Z
|
2019-10-13T03:51:16.000Z
|
#!/usr/bin/env python3
import kfp.dsl as dsl
import kfp.gcp as gcp
# Pipeline input variables.
KUBECTL_IMAGE = "gcr.io/mcas-195423/trackml_master_kfp_kubectl"
KUBECTL_IMAGE_VERSION = "1"
TRACKML_IMAGE = "gcr.io/mcas-195423/trackml_master_trackml"
TRACKML_IMAGE_VERSION = "1"
def train_op():
return dsl.ContainerOp(
name='train',
image="{}:{}".format(TRACKML_IMAGE, TRACKML_IMAGE_VERSION),
command=["python"],
arguments=["train.py"],
).apply(gcp.use_gcp_secret()
)#.set_gpu_limit(1)
def serve_op():
return dsl.ContainerOp(
name='serve',
image="{}:{}".format(KUBECTL_IMAGE, KUBECTL_IMAGE_VERSION),
arguments=[
"/src/set_kubectl.sh",
"--namespace", "kubeflow",
"--command", "apply -f /src/k8s/serve.yaml",
]
).apply(gcp.use_gcp_secret())
def resultsgen_op():
return dsl.ContainerOp(
name='resultsgen',
image="{}:{}".format(TRACKML_IMAGE, TRACKML_IMAGE_VERSION),
command=["python"],
arguments=["resultsgen.py"],
).apply(gcp.use_gcp_secret())
@dsl.pipeline(
name='trackml',
description='A pipeline that predicts particle tracks'
)
def trackml():
train = train_op()
serve = serve_op()
serve.after(train)
resultsgen = resultsgen_op()
resultsgen.after(serve)
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(trackml, __file__ + '.tar.gz')
| 24.157895
| 63
| 0.688453
| 175
| 1,377
| 5.148571
| 0.354286
| 0.079911
| 0.063263
| 0.073252
| 0.372919
| 0.264151
| 0.215316
| 0.142064
| 0.142064
| 0.142064
| 0
| 0.014518
| 0.149601
| 1,377
| 56
| 64
| 24.589286
| 0.75491
| 0.046478
| 0
| 0.204545
| 0
| 0
| 0.223835
| 0.065699
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.068182
| 0.068182
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9de866f5c692eb5d2ae261f2a1854febddba480
| 2,211
|
py
|
Python
|
bin/ticker.py
|
aleasoluciones/infrabbitmq
|
2759590156c63b9a04fb5daf8d588a084fc30629
|
[
"MIT"
] | null | null | null |
bin/ticker.py
|
aleasoluciones/infrabbitmq
|
2759590156c63b9a04fb5daf8d588a084fc30629
|
[
"MIT"
] | null | null | null |
bin/ticker.py
|
aleasoluciones/infrabbitmq
|
2759590156c63b9a04fb5daf8d588a084fc30629
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import time
import puka
import argparse
import logging
from infcommon import utils
from infrabbitmq import factory as infrabbitmq_factory
from infrabbitmq.rabbitmq import RabbitMQError
from infrabbitmq.events_names import (
TICK_1_SECOND,
TICK_1_MINUTE,
TICK_2_MINUTES,
TICK_5_MINUTES,
TICK_60_MINUTES,
)
def publish_event(publisher, event, network, secs, mins):
logging.info("publish event {} {}".format(event, secs))
publisher.publish(event, network, data={'tick': secs, 'mins': mins})
def main(network):
publisher = infrabbitmq_factory.event_publisher_json_serializer()
secs = 0
mins = 0
rabbitmq_exceptions = (RabbitMQError, puka.AMQPError, KeyError,)
while True:
time.sleep(1)
secs += 1
utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions,
publish_event,
publisher, TICK_1_SECOND, network, secs, mins)
if secs % 60 == 0:
mins += 1
secs = 0
utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions,
publish_event,
publisher, TICK_1_MINUTE, network, secs, mins)
if mins % 2 == 0:
utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions,
publish_event,
publisher, TICK_2_MINUTES, network, secs, mins)
if mins % 5 == 0:
utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions,
publish_event,
publisher, TICK_5_MINUTES, network, secs, mins)
if mins % 60 == 0:
utils.do_stuff_with_exponential_backoff(rabbitmq_exceptions,
publish_event,
publisher, TICK_60_MINUTES, network, secs, mins)
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--network', action='store', required=True, help='Network name (ilo, c2k, ...)')
args = parser.parse_args()
network = args.network.split('-')[0]
main(network)
except Exception as exc:
logging.critical("Ticker Fails: {}".format(exc))
| 29.878378
| 114
| 0.622795
| 245
| 2,211
| 5.342857
| 0.302041
| 0.073338
| 0.096257
| 0.061115
| 0.375859
| 0.341482
| 0.298701
| 0.298701
| 0.298701
| 0.298701
| 0
| 0.019595
| 0.284487
| 2,211
| 73
| 115
| 30.287671
| 0.807838
| 0.009498
| 0
| 0.218182
| 0
| 0
| 0.043876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.145455
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9df003e9cd20fdfdd89b5aaebba29cdc7e644c5
| 16,137
|
py
|
Python
|
transformers/modeling_encoder_decoder.py
|
Tarpelite/UniNLP
|
176c2a0f88c8054bf69e1f92693d353737367c34
|
[
"MIT"
] | null | null | null |
transformers/modeling_encoder_decoder.py
|
Tarpelite/UniNLP
|
176c2a0f88c8054bf69e1f92693d353737367c34
|
[
"MIT"
] | 3
|
2021-06-02T00:41:41.000Z
|
2022-02-10T01:07:59.000Z
|
transformers/modeling_encoder_decoder.py
|
Tarpelite/UniNLP
|
176c2a0f88c8054bf69e1f92693d353737367c34
|
[
"MIT"
] | 1
|
2020-01-27T03:02:19.000Z
|
2020-01-27T03:02:19.000Z
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support Encoder-Decoder architectures """
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import torch
from torch import nn
from .modeling_auto import AutoModel, AutoModelWithLMHead
logger = logging.getLogger(__name__)
class PreTrainedEncoderDecoder(nn.Module):
r"""
:class:`~transformers.PreTrainedEncoderDecoder` is a generic model class that will be
instantiated as a transformer architecture with one of the base model
classes of the library as encoder and (optionally) another one as
decoder when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)`
class method.
"""
def __init__(self, encoder, decoder):
super(PreTrainedEncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
@classmethod
def from_pretrained(
cls,
encoder_pretrained_model_name_or_path=None,
decoder_pretrained_model_name_or_path=None,
*model_args,
**kwargs
):
r""" Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you need to first set it back in training mode with `model.train()`
Params:
encoder_pretrained_model_name_or_path: information necessary to initiate the encoder. Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/encoder``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
decoder_pretrained_model_name_or_path: information necessary to initiate the decoder. Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/decoder``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments.
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
You can specify kwargs sepcific for the encoder and decoder by prefixing the key with `encoder_` and `decoder_` respectively. (e.g. ``decoder_output_attention=True``). The remaining kwargs will be passed to both encoders and decoders.
Examples::
model = PreTrainedEncoderDecoder.from_pretained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
"""
# keyword arguments come in 3 flavors: encoder-specific (prefixed by
# `encoder_`), decoder-specific (prefixed by `decoder_`) and those
# that apply to the model as a whole.
# We let the specific kwargs override the common ones in case of conflict.
kwargs_common = {
argument: value
for argument, value in kwargs.items()
if not argument.startswith("encoder_")
and not argument.startswith("decoder_")
}
kwargs_decoder = kwargs_common.copy()
kwargs_encoder = kwargs_common.copy()
kwargs_encoder.update(
{
argument[len("encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("encoder_")
}
)
kwargs_decoder.update(
{
argument[len("decoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("decoder_")
}
)
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
encoder = AutoModel.from_pretrained(
encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder
)
encoder.config.is_decoder = False
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
decoder = AutoModelWithLMHead.from_pretrained(
decoder_pretrained_model_name_or_path, **kwargs_decoder
)
decoder.config.is_decoder = True
model = cls(encoder, decoder)
return model
def save_pretrained(self, save_directory):
""" Save a Seq2Seq model and its configuration file in a format such
that it can be loaded using `:func:`~transformers.PreTrainedEncoderDecoder.from_pretrained`
We save the encoder' and decoder's parameters in two separate directories.
"""
self.encoder.save_pretrained(os.path.join(save_directory, "encoder"))
self.decoder.save_pretrained(os.path.join(save_directory, "decoder"))
def forward(self, encoder_input_ids, decoder_input_ids, **kwargs):
""" The forward pass on a seq2eq depends what we are performing:
- During training we perform one forward pass through both the encoder
and decoder;
- During prediction, we perform one forward pass through the encoder,
and then perform several forward passes with the encoder's hidden
state through the decoder to decode a full sequence.
Therefore, we skip the forward pass on the encoder if an argument named
`encoder_hidden_state` is passed to this function.
Params:
encoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``
Indices of encoder input sequence tokens in the vocabulary.
decoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``
Indices of decoder input sequence tokens in the vocabulary.
kwargs: (`optional`) Remaining dictionary of keyword arguments.
"""
# keyword arguments come in 3 flavors: encoder-specific (prefixed by
# `encoder_`), decoder-specific (prefixed by `decoder_`) and those
# that apply to the model as whole.
# We let the specific kwargs override the common ones in case of conflict.
kwargs_common = {
argument: value
for argument, value in kwargs.items()
if not argument.startswith("encoder_")
and not argument.startswith("decoder_")
}
kwargs_decoder = kwargs_common.copy()
kwargs_encoder = kwargs_common.copy()
kwargs_encoder.update(
{
argument[len("encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("encoder_")
}
)
kwargs_decoder.update(
{
argument[len("decoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("decoder_")
}
)
# Encode if needed (training, first prediction pass)
encoder_hidden_states = kwargs_encoder.pop("hidden_states", None)
if encoder_hidden_states is None:
encoder_outputs = self.encoder(encoder_input_ids, **kwargs_encoder)
encoder_hidden_states = encoder_outputs[
0
] # output the last layer hidden state
else:
encoder_outputs = ()
# Decode
kwargs_decoder["encoder_hidden_states"] = encoder_hidden_states
kwargs_decoder["encoder_attention_mask"] = kwargs_encoder.get(
"attention_mask", None
)
decoder_outputs = self.decoder(decoder_input_ids, **kwargs_decoder)
return decoder_outputs + encoder_outputs
class Model2Model(PreTrainedEncoderDecoder):
r"""
:class:`~transformers.Model2Model` instantiates a Seq2Seq2 model
where both of the encoder and decoder are of the same family. If the
name of or that path to a pretrained model is specified the encoder and
the decoder will be initialized with the pretrained weight (the
cross-attention will be intialized randomly if its weights are not
present).
It is possible to override this behavior and initialize, say, the decoder randomly
by creating it beforehand as follows
config = BertConfig.from_pretrained()
decoder = BertForMaskedLM(config)
model = Model2Model.from_pretrained('bert-base-uncased', decoder_model=decoder)
"""
def __init__(self, *args, **kwargs):
super(Model2Model, self).__init__(*args, **kwargs)
self.tie_weights()
def tie_weights(self):
""" Tying the encoder and decoders' embeddings together.
We need for each to get down to the embedding weights. However the
different model classes are inconsistent to that respect:
- BertModel: embeddings.word_embeddings
- RoBERTa: embeddings.word_embeddings
- XLMModel: embeddings
- GPT2: wte
- BertForMaskedLM: bert.embeddings.word_embeddings
- RobertaForMaskedLM: roberta.embeddings.word_embeddings
argument of the XEmbedding layer for each model, but it is "blocked"
by a model-specific keyword (bert, )...
"""
# self._tie_or_clone_weights(self.encoder, self.decoder)
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
if (
"bert" not in pretrained_model_name_or_path
or "roberta" in pretrained_model_name_or_path
or "distilbert" in pretrained_model_name_or_path
):
raise ValueError("Only the Bert model is currently supported.")
model = super(Model2Model, cls).from_pretrained(
encoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
decoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
*args,
**kwargs
)
return model
class Model2LSTM(PreTrainedEncoderDecoder):
@classmethod
def from_pretrained(cls, *args, **kwargs):
if kwargs.get("decoder_model", None) is None:
# We will create a randomly initilized LSTM model as decoder
if "decoder_config" not in kwargs:
raise ValueError(
"To load an LSTM in Encoder-Decoder model, please supply either: "
" - a torch.nn.LSTM model as `decoder_model` parameter (`decoder_model=lstm_model`), or"
" - a dictionary of configuration parameters that will be used to initialize a"
" torch.nn.LSTM model as `decoder_config` keyword argument. "
" E.g. `decoder_config={'input_size': 768, 'hidden_size': 768, 'num_layers': 2}`"
)
kwargs["decoder_model"] = torch.nn.LSTM(kwargs.pop("decoder_config"))
model = super(Model2LSTM, cls).from_pretrained(*args, **kwargs)
return model
| 51.88746
| 473
| 0.656008
| 1,947
| 16,137
| 5.30303
| 0.216744
| 0.02615
| 0.029443
| 0.032542
| 0.343923
| 0.330073
| 0.302954
| 0.255303
| 0.244649
| 0.237482
| 0
| 0.003413
| 0.273781
| 16,137
| 310
| 474
| 52.054839
| 0.877635
| 0.580653
| 0
| 0.296296
| 0
| 0.007407
| 0.120644
| 0.01786
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059259
| false
| 0.007407
| 0.044444
| 0
| 0.155556
| 0.007407
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9e182705452fe461a2142c0afa4786d47f19c46
| 2,131
|
py
|
Python
|
dags/treinos_igti/treino03.py
|
rafaelols/airflow
|
8e4af5fb576a9568af476c0607819649b724adea
|
[
"Apache-2.0"
] | null | null | null |
dags/treinos_igti/treino03.py
|
rafaelols/airflow
|
8e4af5fb576a9568af476c0607819649b724adea
|
[
"Apache-2.0"
] | null | null | null |
dags/treinos_igti/treino03.py
|
rafaelols/airflow
|
8e4af5fb576a9568af476c0607819649b724adea
|
[
"Apache-2.0"
] | null | null | null |
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from datetime import datetime, timedelta
import pandas as pd
import random
# Default args definition
default_args = {
'owner': 'Rafael',
'depends_on_past': False,
'start_date': datetime(2020, 11, 29, 18, 20),
'email': ['[email protected]', '[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'Retry_delay': timedelta(minutes=1)
}
# Dag definition
dag = DAG(
'treino-03',
description="Extrai dados do Titanic e calcula idade media para homens ou mulheres",
default_args = default_args,
schedule_interval='*/20 * * * *'
)
get_data = BashOperator(
task_id='get-data',
bash_command='curl https://raw.githubusercontent.com/A3Data/hermione/master/hermione/file_text/train.csv -o /usr/local/airflow/data/train.csv',
dag=dag
)
def sorteia_h_m():
return random.choice(['male', 'female'])
escolhe_h_m = PythonOperator(
task_id='escolhe-h-m',
python_callable=sorteia_h_m,
dag=dag
)
def MouF(**context):
value=context['task_instance'].xcom_pull(task_ids='escolhe-h-m')
if value == 'male':
return 'branch_homem'
else:
return 'branch_mulher'
male_female = BranchPythonOperator(
task_id='condicional',
python_callable=MouF,
provide_context=True,
dag=dag
)
def mean_homem():
df = pd.read_csv('/usr/local/airflow/data/train.csv')
med = df.loc[df.Sex == 'male'].Age.mean()
print(f'Media de idade dos homens no Titanic: {med}')
branch_homem = PythonOperator(
task_id='branch_homem',
python_callable=mean_homem,
dag=dag
)
def mean_mulher():
df = pd.read_csv('/usr/local/airflow/data/train.csv')
med = df.loc[df.Sex == 'female'].Age.mean()
print(f'Media de idade das mulheres no Titanic: {med}')
branch_mulher = PythonOperator(
task_id='branch_mulher',
python_callable=mean_mulher,
dag=dag
)
get_data >> escolhe_h_m >> male_female >> [branch_homem, branch_mulher]
| 25.987805
| 147
| 0.697325
| 290
| 2,131
| 4.934483
| 0.406897
| 0.025157
| 0.025157
| 0.039832
| 0.125087
| 0.125087
| 0.106219
| 0.071279
| 0.071279
| 0.071279
| 0
| 0.011878
| 0.170343
| 2,131
| 81
| 148
| 26.308642
| 0.797511
| 0.017832
| 0
| 0.107692
| 0
| 0.015385
| 0.300478
| 0.047368
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.092308
| 0.015385
| 0.2
| 0.030769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9e359c85a06bdc44937457ee401aaa8bebc7f50
| 2,560
|
py
|
Python
|
tclCommands/TclCommandListSys.py
|
DannyPol/flatcam
|
25a8634d0658e98b7fae31a095f8bef40c1b3067
|
[
"MIT"
] | 1
|
2022-02-11T06:19:34.000Z
|
2022-02-11T06:19:34.000Z
|
tclCommands/TclCommandListSys.py
|
MRemy2/FlatCam
|
d4f941335ca8a8d5351aab23b396f99da06a9029
|
[
"MIT"
] | null | null | null |
tclCommands/TclCommandListSys.py
|
MRemy2/FlatCam
|
d4f941335ca8a8d5351aab23b396f99da06a9029
|
[
"MIT"
] | null | null | null |
# ##########################################################
# FlatCAM: 2D Post-processing for Manufacturing #
# File Author: Marius Adrian Stanciu (c) #
# Date: 8/17/2019 #
# MIT Licence #
# ##########################################################
from tclCommands.TclCommand import *
class TclCommandListSys(TclCommand):
"""
Tcl shell command to get the list of system variables
example:
list_sys
"""
# List of all command aliases, to be able use old names for backward compatibility (add_poly, add_polygon)
aliases = ['list_sys', 'listsys']
description = '%s %s' % ("--", "Outputs in Tcl Shell the list with the names of system variables.")
# Dictionary of types from Tcl command, needs to be ordered
arg_names = collections.OrderedDict([
('selection', str),
])
# Dictionary of types from Tcl command, needs to be ordered , this is for options like -optionname value
option_types = collections.OrderedDict([
])
# array of mandatory options for current Tcl command: required = {'name','outname'}
required = []
# structured help for current command, args needs to be ordered
help = {
'main': "Returns the list of the names of system variables.\n"
"Without an argument it will list all the system parameters. "
"As an argument use first letter or first letters from the name "
"of the system variable.\n"
"In that case it will list only the system variables that starts with that string.\n"
"Main categories start with: gerber or excellon or geometry or cncjob or global.\n"
"Note: Use 'get_sys system variable' to get the value and 'set_sys system variable value' to set it.\n",
'args': collections.OrderedDict([
]),
'examples': ['list_sys',
'list_sys ser',
'list_sys gerber',
'list_sys cncj']
}
def execute(self, args, unnamed_args):
"""
:param args:
:param unnamed_args:
:return:
"""
if 'selection' in args:
argument = args['selection']
return str([k for k in self.app.defaults.keys() if str(k).startswith(str(argument))])
else:
ret_val = list(self.app.defaults.keys())
return str(ret_val)
# return str([*self.app.defaults])
| 37.647059
| 120
| 0.55
| 290
| 2,560
| 4.8
| 0.437931
| 0.030172
| 0.036638
| 0.034483
| 0.103448
| 0.067529
| 0.067529
| 0.067529
| 0.067529
| 0.067529
| 0
| 0.004585
| 0.318359
| 2,560
| 67
| 121
| 38.208955
| 0.793123
| 0.3125
| 0
| 0.0625
| 0
| 0.03125
| 0.410863
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.03125
| 0
| 0.34375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9e3f1d0e6ad9650ceb745dc1536525917eaef63
| 2,694
|
py
|
Python
|
ogs5py/fileclasses/mcp/core.py
|
MuellerSeb/ogs5py
|
752e7bd2298fbd476406d168f6b7d1a85863dccd
|
[
"MIT"
] | 3
|
2018-05-27T15:39:07.000Z
|
2018-10-29T17:02:11.000Z
|
ogs5py/fileclasses/mcp/core.py
|
MuellerSeb/ogs5py
|
752e7bd2298fbd476406d168f6b7d1a85863dccd
|
[
"MIT"
] | 1
|
2018-11-12T11:32:12.000Z
|
2018-11-12T13:07:48.000Z
|
ogs5py/fileclasses/mcp/core.py
|
MuellerSeb/ogs5py
|
752e7bd2298fbd476406d168f6b7d1a85863dccd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Class for the ogs COMPONENT_PROPERTIES file."""
from ogs5py.fileclasses.base import BlockFile
class MCP(BlockFile):
"""
Class for the ogs COMPONENT_PROPERTIES file.
Parameters
----------
task_root : str, optional
Path to the destiny model folder.
Default: cwd+"ogs5model"
task_id : str, optional
Name for the ogs task.
Default: "model"
Notes
-----
Main-Keywords (#):
- COMPONENT_PROPERTIES
Sub-Keywords ($) per Main-Keyword:
- COMPONENT_PROPERTIES
- ACENTRIC_FACTOR
- A_ZERO
- BUBBLE_VELOCITY
- CRITICAL_PRESSURE
- CRITICAL_TEMPERATURE
- DECAY
- DIFFUSION
- FLUID_ID
- FLUID_PHASE
- FORMULA
- ISOTHERM
- MAXIMUM_AQUEOUS_SOLUBILITY
- MINERAL_DENSITY
- MOBILE
- MOLAR_DENSITY
- MOLAR_VOLUME
- MOLAR_WEIGHT
- MOL_MASS
- NAME
- OutputMassOfComponentInModel
- TRANSPORT_PHASE
- VALENCE
- VOLUME_DIFFUSION
Standard block:
None
Keyword documentation:
https://ogs5-keywords.netlify.com/ogs/wiki/public/doc-auto/by_ext/mcp
Reading routines:
https://github.com/ufz/ogs5/blob/master/FEM/rfmat_cp.cpp#L269
See Also
--------
add_block
"""
MKEYS = ["COMPONENT_PROPERTIES"]
# sorted
SKEYS = [
[
"NAME",
"FORMULA",
"MOBILE",
"TRANSPORT_PHASE",
"FLUID_PHASE",
"MOL_MASS",
"CRITICAL_PRESSURE",
"CRITICAL_TEMPERATURE",
"ACENTRIC_FACTOR",
"FLUID_ID",
"MOLAR_VOLUME",
"VOLUME_DIFFUSION",
"MINERAL_DENSITY",
"DIFFUSION",
"DECAY",
"ISOTHERM",
"BUBBLE_VELOCITY",
"MOLAR_DENSITY",
"MOLAR_WEIGHT",
"MAXIMUM_AQUEOUS_SOLUBILITY",
"OutputMassOfComponentInModel",
"VALENCE",
"A_ZERO",
"CRITICAL_VOLUME", # really?
"CRITICAL_DENSITY", # really?
"COMP_CAPACITY", # really?
"COMP_CONDUCTIVITY", # really?
"SOLUTE", # really?
"MOLECULAR_WEIGHT", # really?
]
]
STD = {}
def __init__(self, **OGS_Config):
super().__init__(**OGS_Config)
self.file_ext = ".mcp"
| 25.415094
| 78
| 0.485523
| 213
| 2,694
| 5.868545
| 0.516432
| 0.076
| 0.0216
| 0.0224
| 0.0592
| 0.0592
| 0.0592
| 0
| 0
| 0
| 0
| 0.005105
| 0.418337
| 2,694
| 105
| 79
| 25.657143
| 0.792597
| 0.464736
| 0
| 0
| 0
| 0
| 0.346667
| 0.048
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.025
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9e5c18f6a37dd4a96dd21f7ddefb31b197848dd
| 2,853
|
py
|
Python
|
multithreaded_webcrawler.py
|
the-muses-ltd/Multithreaded-Webcrawler-Cassandra-
|
eee68faf3c6ecb548edd0e96ce445dcd366fb735
|
[
"MIT"
] | null | null | null |
multithreaded_webcrawler.py
|
the-muses-ltd/Multithreaded-Webcrawler-Cassandra-
|
eee68faf3c6ecb548edd0e96ce445dcd366fb735
|
[
"MIT"
] | null | null | null |
multithreaded_webcrawler.py
|
the-muses-ltd/Multithreaded-Webcrawler-Cassandra-
|
eee68faf3c6ecb548edd0e96ce445dcd366fb735
|
[
"MIT"
] | null | null | null |
# This is a reusable webcraawler architecture that can be adapted to scrape any webstie.
# RESULTS:
# Roughly 24 seconds per thousand courses scraped for ThreadPoolExecutor vs 63s for unthreaded script.
# This is a very basic implementation of multithreading in order to show the proof of concept, but is a good base to build off of.
import requests
from bs4 import BeautifulSoup
import csv
from concurrent.futures import ProcessPoolExecutor, as_completed, ThreadPoolExecutor
import time
import logging
from mitopencourseware_crawler_worker import mit_crawler
def courses_spider(max_pages):
data_to_csv = [] #holds all data to send to csv
print("Webcrawler workers have started, please wait while we finish crawling...")
# remove max pages loop (unecessary)
page = 1
while page <= max_pages:
url = 'https://ocw.mit.edu/courses/'
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
# Multithread only the work:
# Tuning is required to find the most efficient amount of workers in the thread pool.
with ThreadPoolExecutor(max_workers=30) as executor:
start = time.time()
futures = [ executor.submit(work, link) for link in soup.findAll('h4', {'class': 'course_title'}, limit=100) ]
data_to_csv = []
for result in as_completed(futures):
data_to_csv.append(result.result())
end = time.time()
print("Time Taken to complete: {:.6f}s".format(end-start))
print("Courses extracted: ", len(data_to_csv))
page += 1
export_to_csv(data_to_csv)
def work(link):
# replace this fucntion with the specific crawler you want to use:
return mit_crawler(link)
# Exports data to a formatted csv file, this will be replaced with multithreaded API calls to the Cassandra Prisma Database
# or on the cloud in production, it will be sent to the S3 temporary database to be picked up by the AWS Lambda funtion which will push it to the Cassandra Database
def export_to_csv(csv_data):
with open('web_crawl_data.csv',mode='w') as csv_file:
field_names = ['Title','URL extension','External Website Logo','URL(href)','Description','Course logo URL']
csv_writer = csv.DictWriter(csv_file, fieldnames=field_names)#delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writeheader()
for course in csv_data:
course_data = {
'Title':course[0],
'URL extension':course[1],
'External Website Logo':course[2],
'URL(href)':course[3],
'Description':course[4],
'Course logo URL':course[5],
}
csv_writer.writerow(course_data)
| 42.58209
| 164
| 0.667368
| 383
| 2,853
| 4.859008
| 0.506527
| 0.021494
| 0.024181
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009767
| 0.246407
| 2,853
| 66
| 165
| 43.227273
| 0.855814
| 0.317911
| 0
| 0.045455
| 0
| 0
| 0.1797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.159091
| 0.022727
| 0.25
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9ea76a8227b5405cef7b2e6991bcba1911971f4
| 5,819
|
py
|
Python
|
wikisourcesort.py
|
ostropunk/wikisourcesort
|
3af2d086df0818a75b3e6c34550e2cc1382911a5
|
[
"MIT"
] | null | null | null |
wikisourcesort.py
|
ostropunk/wikisourcesort
|
3af2d086df0818a75b3e6c34550e2cc1382911a5
|
[
"MIT"
] | null | null | null |
wikisourcesort.py
|
ostropunk/wikisourcesort
|
3af2d086df0818a75b3e6c34550e2cc1382911a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import re
# In[2]:
def get_excel_dict(excelfile, key=None, index_col=0, header=0):
dataframe = pd.read_excel(excelfile, index_col=index_col, header=header)
dictionary = dataframe.to_dict()
if key is None:
return dictionary
else:
return dictionary[key]
# In[3]:
def textreader(text):
'''Opens textfile and returns the content as a string'''
with open(text, 'rt', encoding="utf8") as wiki:
txtstring = wiki.read()
return txtstring
# In[44]:
def replace_from_dict(text, dictionary):
'''Replaces words in text with new words in dictionary'''
for word in dictionary:
text = text.replace(word, dictionary[word])
return text
# In[172]:
def get_ref(text):
'''
Finds references between the <ref>- and </ref>-tags
and returns them as a list of strings
'''
ref = re.findall("\<ref.+?\<\/ref\>", text)
return ref
# In[171]:
def getrefurl(ref):
'''Finds the reference url in references and returns it as a string'''
url = re.search("http.+?(?=\s|\|title=|\|titel|\}\})", ref)
url = url.group()
return url
# In[30]:
def get_domain_name(url):
'''
Finds the domain name of the reference url and
returns that name as a string.
'''
domain_name = re.search('(?<=\/\/).+?(?=\/)', url)
domain_name = domain_name.group()
if domain_name.startswith('www.'):
domain_name = domain_name.replace('www.', '')
return domain_name
# In[32]:
def update_ref_dict(ref, ref_dict, ref_counts):
refurl = getrefurl(ref)
domain_name = get_domain_name(refurl)
if refurl not in ref_dict:
if domain_name not in ref_counts:
ref_counts.update({domain_name:1})
refname = domain_name + '.' + str(ref_counts[domain_name])
else:
ref_counts[domain_name] = ref_counts[domain_name] + 1
refname = domain_name + '.' + str(ref_counts[domain_name])
ref_dict.update({refurl:{'refs': [ref], 'refname': refname, 'refurl': refurl}})
else:
if ref not in ref_dict[refurl]['refs']:
ref_dict[refurl]['refs'].append(ref)
return ref_dict, ref_counts
# In[36]:
def create_ref_dict(refs):
'''
Takes a list of references, extracts the reference url and name,
and returns a dictionary sorted on the referenceurl as key.
'''
ref_dict = {}
ref_counts = {}
for ref in refs:
ref_dict, ref_counts = update_ref_dict(ref, ref_dict, ref_counts)
return ref_dict
# In[79]:
def get_ref_tag(text):
'''
Finds references between the <ref>- and </ref>-tags
and returns them as a list of strings
'''
ref = re.findall("\<ref name\=.+?\/\>", text)
#ref = re.findall("\<ref.+?\<\/ref\>|\<ref name\=.+?\/\>", text)
#ref = re.findall("\<ref.+?(?!\"\s\/\>)\<\/ref>", text)
#ref = re.findall("\<ref.+?\<\/ref\>", text)
return set(ref)
# In[130]:
def get_spec_ref(text, ref_tag):
'''
Finds references between the <ref>- and </ref>-tags
and returns them as a list of strings
'''
#ref = re.findall("\<ref name\=.+?\/\>", text)
#ref = re.findall("\<ref.+?\<\/ref\>|\<ref name\=.+?\/\>", text)
#ref = re.findall("\<ref.+?(?!\"\s\/\>)\<\/ref>", text)
ref = re.findall(f'\<ref name\=\"{ref_tag}\"\>.+?\<\/ref\>', text)
ref = ref[0]
return ref
# In[115]:
def get_ref_tag_name(ref_tag):
ref_tag_name = re.findall('\".+\"', ref_tag)
ref_tag_name = ref_tag_name[0].replace('"', '')
return ref_tag_name
# In[136]:
def replace_tags(text):
ref_tags = get_ref_tag(text)
for tag in ref_tags:
name = get_ref_tag_name(tag)
spec_ref = get_spec_ref(text, name)
text = text.replace(tag, spec_ref)
return text
# In[49]:
def replace_countries(text):
countries = get_excel_dict('countries2.xlsx', 'Länder')
text = replace_from_dict(text, countries)
return text
# In[66]:
def replace_headers(text):
headers = {'English title':'Engelsk titel',
'Original title':'Originaltitel',
'Director(s)':'Regissör(er)',
'Country':'Land',
'School':'Skola'}
text = replace_from_dict(text, headers)
return text
# In[169]:
def reference_sorter(text):
'''
Does a bunch of stuff that should be broken out in different functions.
'''
references = get_ref(text)
reference_dict = create_ref_dict(references)
reference_list = []
reference_text = '== Referenser ==\n<references>\n'
text = text.replace('== Källor ==', '== Referenser ==')
text = text.replace('<references/>', '')
for entry in reference_dict:
for reference in reference_dict[entry]['refs']:
text = text.replace(reference, '<ref name="{}" />'.format(reference_dict[entry]['refname']))
reference_list.append('<ref name="{}">{}</ref>'.format(reference_dict[entry]['refname'], entry))
for reference in reference_list:
reference_text += reference +'\n'
reference_text += '</references>'
text = re.split('== Referenser ==', text)
text = text[0] + reference_text + text[-1]
return text
# In[134]:
def fix_wiki_entry(textfile):
with open(textfile, 'r', encoding="utf8") as txt:
text = txt.read()
text = replace_tags(text)
text = reference_sorter(text)
text = replace_countries(text)
text = replace_headers(text)
with open('new_' + textfile, 'w', encoding='utf8') as new_text:
new_text.write(text)
return text
# In[173]:
def main():
fix_wiki_entry(input('Please enter input textfile:'))
if __name__ == "__main__":
main()
| 23.75102
| 104
| 0.598385
| 766
| 5,819
| 4.374674
| 0.206266
| 0.056699
| 0.032229
| 0.03581
| 0.231274
| 0.177261
| 0.177261
| 0.168606
| 0.149508
| 0.149508
| 0
| 0.012192
| 0.238873
| 5,819
| 244
| 105
| 23.848361
| 0.744412
| 0.210174
| 0
| 0.116071
| 0
| 0
| 0.11455
| 0.015364
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151786
| false
| 0
| 0.017857
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9ec253823566d98d214c4860b8c8d8ac8c80515
| 2,188
|
py
|
Python
|
python_utilities/plotting/util.py
|
sdaxen/python_utilities
|
7b9d6cc21bfc31be83629d2ac02b27e886ebc2bb
|
[
"MIT"
] | 2
|
2020-04-13T20:17:36.000Z
|
2020-05-12T01:13:12.000Z
|
python_utilities/plotting/util.py
|
sethaxen/python_utilities
|
7b9d6cc21bfc31be83629d2ac02b27e886ebc2bb
|
[
"MIT"
] | 5
|
2015-10-20T22:57:51.000Z
|
2017-09-07T01:10:23.000Z
|
python_utilities/plotting/util.py
|
sethaxen/python_utilities
|
7b9d6cc21bfc31be83629d2ac02b27e886ebc2bb
|
[
"MIT"
] | 3
|
2015-08-17T17:55:41.000Z
|
2018-09-19T13:56:42.000Z
|
"""Utility functions for plotting.
Author: Seth Axen
E-mail: [email protected]"""
from collections import deque
import numpy as np
def rgb_to_hsv(rgb):
"""Convert RGB colors to HSV colors."""
r, g, b = tuple(map(float, rgb))
if any([r > 1, g > 1, b > 1]):
r /= 255.
g /= 255.
b /= 255.
mmax = max(r, g, b)
mmin = min(r, g, b)
c = mmax - mmin
if (c == 0.):
hp = 0.
elif (mmax == r):
hp = ((g - b) / c) % 6
elif (mmax == g):
hp = ((b - r) / c) + 2
elif (mmax == b):
hp = ((r - g) / c) + 4
h = 60 * hp
v = mmax
if (c == 0):
s = 0
else:
s = c / v
return (h, s, v)
def hsv_to_rgb(hsv):
"""Convert HSV colors to RGB colors."""
h, s, v = tuple(map(float, hsv))
c = v * s
m = v - c
hp = h / 60.
x = c * (1. - abs((hp % 2) - 1.))
hp = int(hp)
rgb = deque((c + m, x + m, m))
if (hp % 2):
rgb.reverse()
rgb.rotate((hp - 3) / 2)
else:
rgb.rotate(hp / 2)
return tuple(rgb)
def rgb_to_yuv(rgb):
"""Convert RGB colors to Y'UV colors, useful for comparison."""
rgbv = np.array(rgb).reshape(3, 1)
if np.any(rgbv > 1.):
rgbv = rgbv / 255.
yuv = np.dot(np.array([[ .299, .587, .114],
[-.14713, -.28886, .436],
[ .615, -.51499, -.10001]], dtype=np.double),
rgbv)
return list(yuv)
def yuv_to_rgb(yuv):
"""Convert Y'UV colors to RGB colors."""
yuvv = np.array(yuv).reshape(3, 1)
rgb = np.dot(np.array([[1., 0., 1.13983],
[1., -.39465, -.58060],
[1., 2.03211, 0.]], dtype=np.double),
yuvv)
return list(rgb)
def compute_yuv_dist(rgb1, rgb2):
"""Compute Euclidean Y'UV distance between RGB colors."""
yuv1 = rgb_to_yuv(rgb1)
yuv2 = rgb_to_yuv(rgb2)
return float(sum((np.array(yuv1) - np.array(yuv2))**2)**.5)
def lighten_rgb(rgb, p=0.):
"""Lighten RGB colors by percentage p of total."""
h, s, v = rgb_to_hsv(rgb)
hsv = (h, s, min(1, v + p))
return hsv_to_rgb(hsv)
| 24.863636
| 74
| 0.472121
| 336
| 2,188
| 3.017857
| 0.300595
| 0.053254
| 0.008876
| 0.021696
| 0.04142
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078652
| 0.349177
| 2,188
| 87
| 75
| 25.149425
| 0.633427
| 0.153565
| 0
| 0.03125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.03125
| 0
| 0.21875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9ec2cc7a1a6ba6f4583fe5b1a6bc53ffc63f837
| 618
|
py
|
Python
|
tests/test_process.py
|
confluentinc/utils-core
|
6001b4c61f7d923d273a23dc5a1580e0fa277d2c
|
[
"MIT"
] | null | null | null |
tests/test_process.py
|
confluentinc/utils-core
|
6001b4c61f7d923d273a23dc5a1580e0fa277d2c
|
[
"MIT"
] | null | null | null |
tests/test_process.py
|
confluentinc/utils-core
|
6001b4c61f7d923d273a23dc5a1580e0fa277d2c
|
[
"MIT"
] | 1
|
2021-01-14T11:33:35.000Z
|
2021-01-14T11:33:35.000Z
|
import pytest
from utils.process import run, silent_run, RunError
from utils.fs import in_temp_dir
def test_run(capsys):
with in_temp_dir():
assert run('echo hello > hello.txt; echo world >> hello.txt', shell=True)
out = run('ls', return_output=True)
assert out == 'hello.txt\n'
out = run(['cat', 'hello.txt'], return_output=True)
assert out == 'hello\nworld\n'
with pytest.raises(RunError):
run('blah')
assert not run('blah', raises=False)
assert silent_run('ls -l')
out, _ = capsys.readouterr()
assert out == ''
| 24.72
| 81
| 0.600324
| 83
| 618
| 4.349398
| 0.433735
| 0.088643
| 0.049862
| 0.121884
| 0.166205
| 0.166205
| 0
| 0
| 0
| 0
| 0
| 0
| 0.26699
| 618
| 24
| 82
| 25.75
| 0.796909
| 0
| 0
| 0
| 0
| 0
| 0.160194
| 0
| 0
| 0
| 0
| 0
| 0.375
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9ed79fef6ca74a4e312f154a876ffa2123179f7
| 16,276
|
py
|
Python
|
slim/nets/inception_resnet_v2.py
|
PPTMiao/mtl-ssl
|
b61449c3f902414304657de6ec217077e441a6b9
|
[
"Apache-2.0"
] | 90
|
2019-06-12T06:11:39.000Z
|
2022-03-21T22:28:38.000Z
|
slim/nets/inception_resnet_v2.py
|
PPTMiao/mtl-ssl
|
b61449c3f902414304657de6ec217077e441a6b9
|
[
"Apache-2.0"
] | 3
|
2020-03-24T17:01:25.000Z
|
2021-02-02T22:00:11.000Z
|
slim/nets/inception_resnet_v2.py
|
PPTMiao/mtl-ssl
|
b61449c3f902414304657de6ec217077e441a6b9
|
[
"Apache-2.0"
] | 17
|
2019-06-15T08:49:46.000Z
|
2022-01-24T06:46:23.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception Resnet V2 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def inception_resnet_v2_base(inputs,
final_endpoint='Conv2d_7b_1x1',
output_stride=16,
align_feature_maps=False,
scope=None):
"""Inception model from http://arxiv.org/abs/1602.07261.
Constructs an Inception Resnet v2 network from inputs to the given final
endpoint. This method can construct the network up to the final inception
block Conv2d_7b_1x1.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_6a', 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']
output_stride: A scalar that specifies the requested ratio of input to
output spatial resolution. Only supports 8 and 16.
align_feature_maps: When true, changes all the VALID paddings in the network
to SAME padding so that the feature maps are aligned.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or if the output_stride is not 8 or 16, or if the output_stride is 8 and
we request an end point after 'PreAuxLogits'.
"""
if output_stride != 8 and output_stride != 16:
raise ValueError('output_stride must be 8 or 16.')
padding = 'SAME' if align_feature_maps else 'VALID'
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding=padding,
scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding=padding,
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding=padding,
scope='MaxPool_3a_3x3')
if add_and_check_final('MaxPool_3a_3x3', net): return net, end_points
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding=padding,
scope='Conv2d_3b_1x1')
if add_and_check_final('Conv2d_3b_1x1', net): return net, end_points
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding=padding,
scope='Conv2d_4a_3x3')
if add_and_check_final('Conv2d_4a_3x3', net): return net, end_points
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, stride=2, padding=padding,
scope='MaxPool_5a_3x3')
if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_0b_1x1')
net = tf.concat(
[tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1], 3)
if add_and_check_final('Mixed_5b', net): return net, end_points
# TODO(alemi): Register intermediate endpoints
net = slim.repeat(net, 10, block35, scale=0.17)
# 17 x 17 x 1088 if output_stride == 8,
# 33 x 33 x 1088 if output_stride == 16
use_atrous = output_stride == 8
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,
stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)
if add_and_check_final('Mixed_6a', net): return net, end_points
# TODO(alemi): register intermediate endpoints
with slim.arg_scope([slim.conv2d], rate=2 if use_atrous else 1):
net = slim.repeat(net, 20, block17, scale=0.10)
if add_and_check_final('PreAuxLogits', net): return net, end_points
if output_stride == 8:
# TODO(gpapan): Properly support output_stride for the rest of the net.
raise ValueError('output_stride==8 is only supported up to the '
'PreAuxlogits end_point for now.')
# 8 x 8 x 2080
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2,
padding=padding,
scope='MaxPool_1a_3x3')
net = tf.concat(
[tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
if add_and_check_final('Mixed_7a', net): return net, end_points
# TODO(alemi): register intermediate endpoints
net = slim.repeat(net, 9, block8, scale=0.20)
net = block8(net, activation_fn=None)
# 8 x 8 x 1536
net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
if add_and_check_final('Conv2d_7b_1x1', net): return net, end_points
raise ValueError('final_endpoint (%s) not recognized', final_endpoint)
def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionResnetV2',
create_aux_logits=True):
"""Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxilliary logits.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs, num_classes],
reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_resnet_v2_base(inputs, scope=scope)
if create_aux_logits:
with tf.variable_scope('AuxLogits'):
aux = end_points['PreAuxLogits']
aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID',
scope='Conv2d_1a_3x3')
aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1')
aux = slim.conv2d(aux, 768, aux.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a_5x5')
aux = slim.flatten(aux)
aux = slim.fully_connected(aux, num_classes, activation_fn=None,
scope='Logits')
end_points['AuxLogits'] = aux
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_resnet_v2.default_image_size = 299
def inception_resnet_v2_arg_scope(weight_decay=0.00004,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
trainable=True):
"""Returns the scope with the default parameters for inception_resnet_v2.
Args:
weight_decay: the weight decay for weights variables.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
Returns:
a arg_scope with the parameters needed for inception_resnet_v2.
"""
# Set weight_decay for weights in conv2d and fully_connected layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay),
trainable=trainable):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'trainable': trainable
}
# Set activation_fn and parameters for batch_norm.
with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as scope:
return scope
| 45.085873
| 80
| 0.616552
| 2,204
| 16,276
| 4.310345
| 0.158802
| 0.049474
| 0.041263
| 0.056
| 0.526737
| 0.469368
| 0.439368
| 0.371789
| 0.352
| 0.337474
| 0
| 0.07204
| 0.281887
| 16,276
| 360
| 81
| 45.211111
| 0.74076
| 0.232735
| 0
| 0.300448
| 0
| 0
| 0.107128
| 0
| 0
| 0
| 0
| 0.002778
| 0
| 1
| 0.03139
| false
| 0
| 0.017937
| 0
| 0.076233
| 0.004484
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9ee27c57dbf76a3c2165139cae647ead0e58c46
| 6,479
|
py
|
Python
|
tests/boilerplate_client/boilerplate_cmd.py
|
LedgerHQ/ledger-app-neo3
|
48e1e0dec3e4801fc3ab1b07c4fe4ed86735a642
|
[
"MIT"
] | null | null | null |
tests/boilerplate_client/boilerplate_cmd.py
|
LedgerHQ/ledger-app-neo3
|
48e1e0dec3e4801fc3ab1b07c4fe4ed86735a642
|
[
"MIT"
] | 5
|
2021-09-13T16:41:52.000Z
|
2022-01-12T16:00:21.000Z
|
tests/boilerplate_client/boilerplate_cmd.py
|
isabella232/app-neo3
|
c48ec5032143fe606d694372c2cfc02082b2ce03
|
[
"MIT"
] | 3
|
2021-09-01T11:40:09.000Z
|
2022-03-06T06:45:13.000Z
|
import struct
from typing import Tuple
from ledgercomm import Transport
from boilerplate_client.boilerplate_cmd_builder import BoilerplateCommandBuilder, InsType
from boilerplate_client.button import Button
from boilerplate_client.exception import DeviceException
from boilerplate_client.transaction import Transaction
from neo3.network import payloads
class BoilerplateCommand:
def __init__(self,
transport: Transport,
debug: bool = False) -> None:
self.transport = transport
self.builder = BoilerplateCommandBuilder(debug=debug)
self.debug = debug
def get_app_and_version(self) -> Tuple[str, str]:
sw, response = self.transport.exchange_raw(
self.builder.get_app_and_version()
) # type: int, bytes
if sw != 0x9000:
raise DeviceException(error_code=sw, ins=0x01)
# response = format_id (1) ||
# app_name_len (1) ||
# app_name (var) ||
# version_len (1) ||
# version (var) ||
offset: int = 0
format_id: int = response[offset]
offset += 1
app_name_len: int = response[offset]
offset += 1
app_name: str = response[offset:offset + app_name_len].decode("ascii")
offset += app_name_len
version_len: int = response[offset]
offset += 1
version: str = response[offset:offset + version_len].decode("ascii")
offset += version_len
return app_name, version
def get_version(self) -> Tuple[int, int, int]:
sw, response = self.transport.exchange_raw(
self.builder.get_version()
) # type: int, bytes
if sw != 0x9000:
raise DeviceException(error_code=sw, ins=InsType.INS_GET_VERSION)
# response = MAJOR (1) || MINOR (1) || PATCH (1)
assert len(response) == 3
major, minor, patch = struct.unpack(
"BBB",
response
) # type: int, int, int
return major, minor, patch
def get_app_name(self) -> str:
sw, response = self.transport.exchange_raw(
self.builder.get_app_name()
) # type: int, bytes
if sw != 0x9000:
raise DeviceException(error_code=sw, ins=InsType.INS_GET_APP_NAME)
return response.decode("ascii")
def get_public_key(self, bip44_path: str, display: bool = False) -> bytes:
sw, response = self.transport.exchange_raw(
self.builder.get_public_key(bip44_path=bip44_path)
) # type: int, bytes
if sw != 0x9000:
raise DeviceException(error_code=sw, ins=InsType.INS_GET_PUBLIC_KEY)
assert len(response) == 65 # 04 + 64 bytes of uncompressed key
return response
def sign_tx(self, bip44_path: str, transaction: payloads.Transaction, network_magic: int, button: Button) -> Tuple[int, bytes]:
sw: int
response: bytes = b""
for is_last, chunk in self.builder.sign_tx(bip44_path=bip44_path,
transaction=transaction,
network_magic=network_magic):
self.transport.send_raw(chunk)
if is_last:
# Review Transaction
button.right_click()
# Destination address
button.right_click()
button.right_click()
button.right_click()
# Token Amount
button.right_click()
# Target network
button.right_click()
# System fee
button.right_click()
# Network fee
button.right_click()
# Total fees
button.right_click()
# Valid until
button.right_click()
# Signer 1 of 1
button.right_click()
# Account 1/3, 2/3, 3/3
button.right_click()
button.right_click()
button.right_click()
# Scope
button.right_click()
# custom contracts
if (len(transaction.signers) > 0 and
payloads.WitnessScope.CUSTOM_CONTRACTS in transaction.signers[0].scope):
for _ in range(len(transaction.signers[0].allowed_contracts)):
button.right_click()
button.right_click()
button.right_click()
# Approve
button.both_click()
sw, response = self.transport.recv() # type: int, bytes
if sw != 0x9000:
raise DeviceException(error_code=sw, ins=InsType.INS_SIGN_TX)
return response
def sign_vote_tx(self, bip44_path: str, transaction: Transaction, network_magic: int, button: Button) -> Tuple[int, bytes]:
sw: int
response: bytes = b""
for is_last, chunk in self.builder.sign_tx(bip44_path=bip44_path,
transaction=transaction,
network_magic=network_magic):
self.transport.send_raw(chunk)
if is_last:
# Review Transaction
button.right_click()
# Vote to public key
button.right_click()
button.right_click()
button.right_click()
button.right_click()
# Target network
button.right_click()
# System fee
button.right_click()
# Network fee
button.right_click()
# Total fees
button.right_click()
# Valid until
button.right_click()
# Signer 1 of 1
button.right_click()
# Account 1/3, 2/3, 3/3
button.right_click()
button.right_click()
button.right_click()
# Scope
button.right_click()
# Approve
button.both_click()
sw, response = self.transport.recv() # type: int, bytes
if sw != 0x9000:
raise DeviceException(error_code=sw, ins=InsType.INS_SIGN_TX)
return response
| 34.462766
| 131
| 0.533416
| 659
| 6,479
| 5.048558
| 0.174507
| 0.109107
| 0.158702
| 0.072738
| 0.607454
| 0.607454
| 0.581004
| 0.562369
| 0.562369
| 0.495041
| 0
| 0.022061
| 0.384319
| 6,479
| 188
| 132
| 34.462766
| 0.811983
| 0.104491
| 0
| 0.565574
| 0
| 0
| 0.003123
| 0
| 0
| 0
| 0.00694
| 0
| 0.016393
| 1
| 0.057377
| false
| 0
| 0.065574
| 0
| 0.180328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9efa4ffda8cacd286187e29ce110d292c7a1e64
| 946
|
py
|
Python
|
clpy/sparse/util.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 142
|
2018-06-07T07:43:10.000Z
|
2021-10-30T21:06:32.000Z
|
clpy/sparse/util.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 282
|
2018-06-07T08:35:03.000Z
|
2021-03-31T03:14:32.000Z
|
clpy/sparse/util.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 19
|
2018-06-19T11:07:53.000Z
|
2021-05-13T20:57:04.000Z
|
import clpy
import clpy.sparse.base
_preamble_atomic_add = '''
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val) {
unsigned long long* address_as_ull =
(unsigned long long*)address;
unsigned long long old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
'''
def isintlike(x):
try:
return bool(int(x) == x)
except (TypeError, ValueError):
return False
def isscalarlike(x):
return clpy.isscalar(x) or (clpy.sparse.base.isdense(x) and x.ndim == 0)
def isshape(x):
if not isinstance(x, tuple) or len(x) != 2:
return False
m, n = x
return isintlike(m) and isintlike(n)
| 24.25641
| 76
| 0.60148
| 118
| 946
| 4.567797
| 0.449153
| 0.06679
| 0.089054
| 0.085343
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007496
| 0.294926
| 946
| 38
| 77
| 24.894737
| 0.8006
| 0
| 0
| 0.066667
| 0
| 0
| 0.547569
| 0.113108
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.066667
| 0.033333
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9efc68d74f0ff6411265258b8ee1094b0fa820e
| 1,316
|
py
|
Python
|
test/test_cartesian.py
|
hwazni/discopy
|
812a4c77de4c766591bad74306720b518cdc54fc
|
[
"BSD-3-Clause"
] | 205
|
2019-12-29T09:45:09.000Z
|
2022-03-24T09:29:13.000Z
|
test/test_cartesian.py
|
hwazni/discopy
|
812a4c77de4c766591bad74306720b518cdc54fc
|
[
"BSD-3-Clause"
] | 61
|
2019-12-11T10:46:38.000Z
|
2022-03-28T17:10:52.000Z
|
test/test_cartesian.py
|
hwazni/discopy
|
812a4c77de4c766591bad74306720b518cdc54fc
|
[
"BSD-3-Clause"
] | 46
|
2020-04-08T23:33:31.000Z
|
2022-03-18T21:58:35.000Z
|
from pytest import raises
from discopy.cartesian import *
def test_Box_repr():
f = Box('f', 1, 2, lambda x: (x, x))
assert "Box('f', 1, 2" in repr(f)
def test_Function_str():
f = Function(2, 1, lambda x, y: x + y)
assert 'Function(dom=2, cod=1,' in str(f)
def test_Function_call():
f = Swap(2, 1)
values = (2, 3)
with raises(TypeError) as err:
f(*values)
assert str(err.value) == messages.expected_input_length(f, values)
def test_Function_then():
f, g = Function(2, 1, lambda x, y: x + y), Function(1, 1, lambda x: x + 1)
assert Function.id(2).then(*(f, g))(20, 21) == 42
def test_Function_then_err():
f = Function(2, 1, lambda x, y: x + y)
g = (lambda x: x, )
with raises(TypeError) as err:
f >> g
assert str(err.value) == messages.type_err(Function, g)
g = Function.id(2)
with raises(AxiomError) as err:
f >> g
assert str(err.value) == messages.does_not_compose(f, g)
def test_Function_tensor():
assert Function.id(3)(1, 2, 3)\
== Function.id(0).tensor(*(3 * [Function.id(1)]))(1, 2, 3)
def test_Function_tensor_err():
f = Function(2, 1, lambda x, y: x + y)
g = (lambda x: x, )
with raises(TypeError) as err:
f @ g
assert str(err.value) == messages.type_err(Function, g)
| 25.803922
| 78
| 0.595745
| 221
| 1,316
| 3.447964
| 0.217195
| 0.073491
| 0.11811
| 0.08399
| 0.423885
| 0.391076
| 0.358268
| 0.358268
| 0.332021
| 0.262467
| 0
| 0.039039
| 0.240881
| 1,316
| 50
| 79
| 26.32
| 0.723724
| 0
| 0
| 0.333333
| 0
| 0
| 0.027356
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.194444
| false
| 0
| 0.055556
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9f0ba759404ab21f8b93c6f40fde8e030bbf8a1
| 12,904
|
py
|
Python
|
qiskit_metal/qlibrary/qubits/Transmon_Interdigitated.py
|
PatrickSJacobs/qiskit-metal
|
9628369c4b880d1e13199e559f898c5e0b96eecb
|
[
"Apache-2.0"
] | null | null | null |
qiskit_metal/qlibrary/qubits/Transmon_Interdigitated.py
|
PatrickSJacobs/qiskit-metal
|
9628369c4b880d1e13199e559f898c5e0b96eecb
|
[
"Apache-2.0"
] | null | null | null |
qiskit_metal/qlibrary/qubits/Transmon_Interdigitated.py
|
PatrickSJacobs/qiskit-metal
|
9628369c4b880d1e13199e559f898c5e0b96eecb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
#from math import *
from math import sin, cos
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core.base import QComponent
import numpy as np
#from ... import config
#if not config.is_building_docs():
# from qiskit_metal import is_true
class TransmonInterdigitated(QComponent):
"""
The base "TransmonInterdigitated" inherits the "QComponent" class.
This creates a transmon pocket with two large pads connected by a Josephson
junction. Both pads have four interdigitated "fingers" which increase the
capacitance of the structure. There are three coupling capacitor pads with qpins
defined; these can be connected to other structures in a design using CPWs.
Default Options:
* pad_width: '1000um' -- width of the large rectanglular pads on either side
of the junction
* pad_height: '300um' -- height of the large rectanglular pads on either side
of the junction
* finger_width: '50um' -- width of the "finger" on either side of the junction
* finger_height: '100um' -- height of the "finger" on the side of the junction
* finger_space: '50um' -- height of the Josephson Junction (equivalently; space
between two fingers)
* pad_pos_x: '0um' -- the internal coordinate defining the center of the bottom
rectangular pad
* pad_pos_y: '0um' -- the internal coordinate defining the center of the bottom
rectangular pad
* comb_width: '50um' -- the width of the four interdigitated combs connected to
either pad
* comb_space_vert: '50um' -- the space between the edge of a comb and the edge of
the opposite rectangular pad
* comb_space_hor: '50um' -- the space between adjacent interdigitated comb structures
* jj_width: '20um' -- the width of the Josephson Junction located between the two
fingers of the device
* cc_space: '50um' -- the space between the lower rectangular pad and the coupling
capacitor below it
* cc_width: '100um' -- the width of the coupling capacitor located below the bottom
rectangular pad
* cc_height: '100um' -- the height of the coupling capacitor located below the bottom
rectangular pad
* cc_topleft_space: '50um' -- the space between the upper rectangular pad and the top
left coupling capacitor
* cc_topleft_width: '100um' -- the width of the top left coupling capacitor pad
* cc_topleft_height: '100um' -- the height of the top left coupling capacitor pad
* cc_topright_space: '50um' -- the space between the upper rectangular pad and the
top right coupling capacitor
* cc_topright_width: '100um' -- the width of the top right coupling capacitor pad
* cc_topright_height: '100um' -- the height of the top right coupling capacitor pad
* position_x: '0um' -- the x-coordinate defining the center of the transmon pocket
on the chip
* position_y: '0um' -- the y-coordinate defining the center of the transmon pocket
on the chip
* rotation: '0.0' -- the angle at which the entire structure is rotated
* rotation_top_pad: '180' -- internal coordinate defining the angle of rotation
between top and bottom pads
* layer: '1' -- all objcets are drawn assuming they are part of the same layer on a
the chip
"""
# Default drawing options
default_options = Dict(pad_width='1000um',
pad_height='300um',
finger_width='50um',
finger_height='100um',
finger_space='50um',
pad_pos_x='0um',
pad_pos_y='0um',
comb_width='50um',
comb_space_vert='50um',
comb_space_hor='50um',
jj_width='20um',
cc_space='50um',
cc_width='100um',
cc_height='100um',
cc_topleft_space='50um',
cc_topleft_width='100um',
cc_topleft_height='100um',
cc_topright_space='50um',
cc_topright_width='100um',
cc_topright_height='100um',
position_x='0um',
position_y='0um',
rotation='0.0',
rotation_top_pad='180',
layer='1')
"""Default drawing options"""
# Name prefix of component, if user doesn't provide name
component_metadata = Dict(short_name='component')
"""Component metadata"""
def make(self):
"""Convert self.options into QGeometry."""
p = self.parse_options() # Parse the string options into numbers
# draw the lower pad as a rectangle
pad_lower = draw.rectangle(p.pad_width, p.pad_height, p.pad_pos_x,
p.pad_pos_y)
# draw the lower finger as a rectangle
finger_lower = draw.rectangle(
p.finger_width, p.finger_height, p.pad_pos_x, p.pad_pos_y +
0.49999 * (p.pad_height) + 0.49999 * (p.finger_height))
# draw the Josephson Junction
rect_jj = draw.rectangle(
p.jj_width, p.finger_space, p.pad_pos_x,
0.5 * (p.pad_height) + p.finger_height + 0.5 * (p.finger_space))
# draw the first comb to the right of the lower finger as a rectangle
comb1_lower = draw.rectangle(
p.comb_width,
(2 * p.finger_height + p.finger_space - p.comb_space_vert),
(0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.comb_width),
(0.5 * p.pad_height + 0.5 *
(p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height))))
# draw the second comb to the right of the lower finger by translating the first comb
comb2_lower = draw.translate(comb1_lower,
2.0 * (p.comb_space_hor + p.comb_width),
0.0)
# draw the first comb to the left of the lower finger
comb3_lower = draw.rectangle(
p.comb_width,
(2 * p.finger_height + p.finger_space - p.comb_space_vert),
(-0.5 * p.finger_width - 2.0 * p.comb_space_hor -
1.5 * p.comb_width),
(0.5 * p.pad_height + 0.5 *
(p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height))))
# draw the second comb to the left of the lower finger
comb4_lower = draw.translate(comb3_lower,
-2.0 * (p.comb_space_hor + p.comb_width),
0.0)
coupling_capacitor = draw.rectangle(
p.cc_width, p.cc_height, p.pad_pos_x,
p.pad_pos_y - 0.5 * (p.pad_height) - p.cc_space - 0.5 * p.cc_height)
cc_topleft = draw.rectangle(
p.cc_topleft_width, p.cc_topleft_height,
p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height)
cc_topright = draw.translate(
cc_topleft,
p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p.cc_topright_width,
0.0)
# merge the bottom elements
bottom = draw.union(pad_lower, finger_lower, comb1_lower, comb2_lower,
comb3_lower, comb4_lower)
# create the top portion of the comb by translating and rotating
# the bottom portion of the comb
top = draw.translate(bottom, 0.0, p.pad_height + p.finger_space)
top = draw.rotate(top, p.rotation_top_pad)
# merge everything into a single design
design = draw.union(bottom, top, rect_jj, coupling_capacitor,
cc_topleft, cc_topright)
# draw the transmon pocket bounding box
pocket = draw.rectangle(1.5 * p.pad_width, 5.0 * p.pad_height)
# the origin is originally set to the middle of the lower pad.
# Let's move it to the center of the JJ.
design = draw.translate(
design, 0.0,
-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
# now translate the final structure according to the user input
design = draw.rotate(design, p.rotation, origin=(0, 0))
design = draw.translate(design, p.position_x, p.position_y)
pocket = draw.rotate(pocket, p.rotation, origin=(0, 0))
pocket = draw.translate(pocket, p.position_x, p.position_y)
geom = {'design': design}
geom_pocket = {'pocket': pocket}
self.add_qgeometry('poly', geom, layer=p.layer, subtract=False)
self.add_qgeometry('poly', geom_pocket, layer=p.layer, subtract=True)
###################################################################
# Add Qpin connections for coupling capacitors
# define a function that both rotates and translates the
# qpin coordinates
def qpin_rotate_translate(x):
""" This function rotates the coordinates of the three qpins
according to the user inputs for "position_x", "position_y"
and "rotation".
"""
y = list(x)
z = [0.0, 0.0]
z[0] = y[0] * cos(p.rotation * 3.14159 / 180) - y[1] * sin(
p.rotation * 3.14159 / 180)
z[1] = y[0] * sin(p.rotation * 3.14159 / 180) + y[1] * cos(
p.rotation * 3.14159 / 180)
z[0] = z[0] + p.position_x
z[1] = z[1] + p.position_y
x = (z[0], z[1])
return x
# Add Qpin connections for the bottom coupling capacitor
qp1a = (0.0,
-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp1b = (0.0, -0.5 * p.pad_height - p.cc_space - p.cc_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
# rotate and translate the qpin coordinates
qp1a = qpin_rotate_translate(qp1a)
qp1b = qpin_rotate_translate(qp1b)
self.add_pin('pin1',
points=np.array([qp1a, qp1b]),
width=0.01,
input_as_norm=True)
# Add Qpin connections for top left coupling capacitor
qp2a = (p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space +
0.5 * p.cc_topleft_height - 0.5 * p.pad_height -
p.finger_height - 0.5 * p.finger_space)
qp2b = (p.pad_pos_x - 0.5 * p.pad_width, p.pad_pos_y +
1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +
p.cc_topleft_space + 0.5 * p.cc_topleft_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp2a = qpin_rotate_translate(qp2a)
qp2b = qpin_rotate_translate(qp2b)
self.add_pin('pin2',
points=np.array([qp2a, qp2b]),
width=0.01,
input_as_norm=True)
# Add Qpin connections for top right coupling capacitor
qp3a = (p.pad_pos_x + 0.5 * p.pad_width - 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space +
0.5 * p.cc_topleft_height - 0.5 * p.pad_height -
p.finger_height - 0.5 * p.finger_space)
qp3b = (p.pad_pos_x + 0.5 * p.pad_width, p.pad_pos_y +
1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +
p.cc_topleft_space + 0.5 * p.cc_topleft_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp3a = qpin_rotate_translate(qp3a)
qp3b = qpin_rotate_translate(qp3b)
self.add_pin('pin3',
points=np.array([qp3a, qp3b]),
width=0.01,
input_as_norm=True)
| 45.43662
| 93
| 0.577805
| 1,763
| 12,904
| 4.056154
| 0.152014
| 0.014543
| 0.018878
| 0.01762
| 0.420361
| 0.379108
| 0.346665
| 0.312823
| 0.284016
| 0.275766
| 0
| 0.043438
| 0.327418
| 12,904
| 283
| 94
| 45.597173
| 0.780505
| 0.376317
| 0
| 0.262411
| 0
| 0
| 0.018596
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014184
| false
| 0
| 0.028369
| 0
| 0.070922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9f2ed71da13f5b57b61c1c386731f8180c40992
| 667
|
py
|
Python
|
www/app.py
|
leeeGreat/xlw_study_python
|
03d8eb59f6826b4689d6598ede6393ecbb5058fb
|
[
"MIT"
] | 1
|
2018-03-12T12:29:21.000Z
|
2018-03-12T12:29:21.000Z
|
www/app.py
|
leeeGreat/xlw_study_python
|
03d8eb59f6826b4689d6598ede6393ecbb5058fb
|
[
"MIT"
] | null | null | null |
www/app.py
|
leeeGreat/xlw_study_python
|
03d8eb59f6826b4689d6598ede6393ecbb5058fb
|
[
"MIT"
] | 1
|
2018-04-13T13:26:50.000Z
|
2018-04-13T13:26:50.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao'
'''
async web application.
'''
import logging; logging.basicConfig(level=logging.INFO)
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
def index(request):
return web.Response(body=b'<h1>Awesome</h1>')
async def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', index)
srv = await loop.create_server(app.make_handler(), '127.0.0.1', 9000)
logging.info('server started at http://127.0.0.1:9000...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
| 22.233333
| 73
| 0.698651
| 99
| 667
| 4.585859
| 0.59596
| 0.052863
| 0.022026
| 0.026432
| 0.044053
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041885
| 0.14093
| 667
| 29
| 74
| 23
| 0.750436
| 0.064468
| 0
| 0
| 0
| 0
| 0.140203
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.25
| 0.0625
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9f306cc03073671d285f885169a3fe6dd743eef
| 684
|
py
|
Python
|
examples/Testing/flopy3_plotdata.py
|
ritchie46/flopy
|
8e7284dcb3aaf5c12293d442248c2c2d9959f835
|
[
"CC0-1.0",
"BSD-3-Clause"
] | 1
|
2021-03-17T09:15:54.000Z
|
2021-03-17T09:15:54.000Z
|
examples/Testing/flopy3_plotdata.py
|
ritchie46/flopy
|
8e7284dcb3aaf5c12293d442248c2c2d9959f835
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
examples/Testing/flopy3_plotdata.py
|
ritchie46/flopy
|
8e7284dcb3aaf5c12293d442248c2c2d9959f835
|
[
"CC0-1.0",
"BSD-3-Clause"
] | 1
|
2021-08-05T19:11:27.000Z
|
2021-08-05T19:11:27.000Z
|
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import flopy
fb = flopy.modflow.Modflow.load('freyberg', version='mf2005', model_ws=os.path.join('..', 'data', 'freyberg'), verbose=True)
dis = fb.dis
top = fb.dis.top
fb.dis.top.plot(grid=True, colorbar=True)
fb.dis.botm.plot(grid=True, colorbar=True)
fb.dis.plot()
plt.show()
fb.dis.plot()
plt.show()
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1,2,1, aspect='equal')
fb.dis.top.plot(grid=True, axes=ax, colorbar=True)
ax = fig.add_subplot(1,2,2, aspect='equal')
fb.dis.botm.plot(grid=True, axes=ax, colorbar=True)
plt.show()
print('this is the end my friend')
| 20.727273
| 124
| 0.71345
| 121
| 684
| 3.966942
| 0.421488
| 0.083333
| 0.066667
| 0.041667
| 0.466667
| 0.410417
| 0.245833
| 0
| 0
| 0
| 0
| 0.019704
| 0.109649
| 684
| 33
| 125
| 20.727273
| 0.768473
| 0
| 0
| 0.238095
| 0
| 0
| 0.091971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.238095
| 0
| 0.238095
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9f53b3bd4af7f2d655423b3e5a97d903f5c6dac
| 2,025
|
py
|
Python
|
apps/pypi/tests/test_slurper.py
|
cartwheelweb/packaginator
|
f6ce11da22154bce9cba42e896989bdb0fd5e865
|
[
"MIT"
] | 1
|
2015-11-08T11:31:09.000Z
|
2015-11-08T11:31:09.000Z
|
apps/pypi/tests/test_slurper.py
|
cartwheelweb/packaginator
|
f6ce11da22154bce9cba42e896989bdb0fd5e865
|
[
"MIT"
] | null | null | null |
apps/pypi/tests/test_slurper.py
|
cartwheelweb/packaginator
|
f6ce11da22154bce9cba42e896989bdb0fd5e865
|
[
"MIT"
] | null | null | null |
from django.template.defaultfilters import slugify
from django.test import TestCase
from package.models import Package, Version
from pypi.slurper import Slurper
TEST_PACKAGE_NAME = 'Django'
TEST_PACKAGE_VERSION = '1.3'
TEST_PACKAGE_REPO_NAME = 'django-uni-form'
class SlurpAllTests(TestCase):
def test_get_latest_version_number(self):
slurper = Slurper(TEST_PACKAGE_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_NAME)
self.assertEquals(version, TEST_PACKAGE_VERSION)
def test_get_or_create_package(self):
slurper = Slurper(TEST_PACKAGE_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_NAME)
package, created = slurper.get_or_create_package(TEST_PACKAGE_NAME, version)
self.assertTrue(created)
self.assertTrue(isinstance(package, Package))
self.assertEquals(package.title, TEST_PACKAGE_NAME)
self.assertEquals(package.slug, slugify(TEST_PACKAGE_NAME))
def test_get_or_create_with_repo(self):
slurper = Slurper(TEST_PACKAGE_REPO_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_REPO_NAME)
package, created = slurper.get_or_create_package(TEST_PACKAGE_REPO_NAME, version)
self.assertTrue(created)
self.assertTrue(isinstance(package, Package))
self.assertEquals(package.title, TEST_PACKAGE_REPO_NAME)
self.assertEquals(package.slug, slugify(TEST_PACKAGE_REPO_NAME))
def test_check_versions(self):
slurper = Slurper(TEST_PACKAGE_REPO_NAME)
version = slurper.get_latest_version_number(TEST_PACKAGE_REPO_NAME)
# make me a package (Actually, make me a billionare)
slurper.get_or_create_package(TEST_PACKAGE_REPO_NAME, version)
# fetch the package for testing
package = Package.objects.get(title=TEST_PACKAGE_REPO_NAME)
self.assertTrue(package.pypi_downloads > 1000)
| 39.705882
| 89
| 0.718025
| 244
| 2,025
| 5.602459
| 0.20082
| 0.160936
| 0.109729
| 0.13899
| 0.663497
| 0.625457
| 0.602048
| 0.602048
| 0.530358
| 0.530358
| 0
| 0.003766
| 0.213333
| 2,025
| 51
| 90
| 39.705882
| 0.854363
| 0.039506
| 0
| 0.352941
| 0
| 0
| 0.012352
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.264706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9f57949a15383ed2a070813678af904fe2e2df0
| 1,145
|
py
|
Python
|
azure-mgmt-logic/azure/mgmt/logic/models/recurrence_schedule_occurrence.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | 1
|
2017-10-29T15:14:35.000Z
|
2017-10-29T15:14:35.000Z
|
azure-mgmt-logic/azure/mgmt/logic/models/recurrence_schedule_occurrence.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | null | null | null |
azure-mgmt-logic/azure/mgmt/logic/models/recurrence_schedule_occurrence.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RecurrenceScheduleOccurrence(Model):
"""RecurrenceScheduleOccurrence.
:param day: The day of the week. Possible values include: 'Sunday',
'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'
:type day: str or :class:`DayOfWeek <azure.mgmt.logic.models.DayOfWeek>`
:param occurrence: The occurrence.
:type occurrence: int
"""
_attribute_map = {
'day': {'key': 'day', 'type': 'DayOfWeek'},
'occurrence': {'key': 'occurrence', 'type': 'int'},
}
def __init__(self, day=None, occurrence=None):
self.day = day
self.occurrence = occurrence
| 34.69697
| 76
| 0.590393
| 116
| 1,145
| 5.775862
| 0.672414
| 0.041791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001056
| 0.172926
| 1,145
| 32
| 77
| 35.78125
| 0.706441
| 0.655022
| 0
| 0
| 0
| 0
| 0.146479
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9f6bdae288edaa527af57b654eafa00cfa5047b
| 11,757
|
py
|
Python
|
pandas/core/apply.py
|
AakankshaAshok/pandas
|
6498bc1e8a12003640139db4794bd5cd2462c116
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/apply.py
|
AakankshaAshok/pandas
|
6498bc1e8a12003640139db4794bd5cd2462c116
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/apply.py
|
AakankshaAshok/pandas
|
6498bc1e8a12003640139db4794bd5cd2462c116
|
[
"BSD-3-Clause"
] | null | null | null |
import inspect
import numpy as np
from pandas._libs import reduction as libreduction
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from pandas.core.dtypes.generic import ABCSeries
def frame_apply(
obj,
func,
axis=0,
raw=False,
result_type=None,
ignore_failures=False,
args=None,
kwds=None,
):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
ignore_failures=ignore_failures,
args=args,
kwds=kwds,
)
class FrameApply:
def __init__(self, obj, func, raw, result_type, ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (kwds or args) and not isinstance(func, (np.ufunc, str)):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, str):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = inspect.getfullargspec(func)
if "axis" in sig.args:
self.kwds["axis"] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all="ignore"):
results = self.obj._data.apply("apply", func=self.f)
return self.obj._constructor(
data=results, index=self.index, columns=self.columns, copy=False
)
# broadcasting
if self.result_type == "broadcast":
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ["reduce", None]:
return self.obj.copy()
# we may need to infer
should_reduce = self.result_type == "reduce"
from pandas import Series
if not should_reduce:
try:
r = self.f(Series([]))
except Exception:
pass
else:
should_reduce = not isinstance(r, Series)
if should_reduce:
if len(self.agg_axis):
r = self.f(Series([]))
else:
r = np.nan
return self.obj._constructor_sliced(r, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = libreduction.compute_reduction(self.values, self.f, axis=self.axis)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result, index=self.index, columns=self.columns)
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
def apply_broadcast(self, target):
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(
result_values, index=target.index, columns=target.columns
)
return result
def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (
self.result_type in ["reduce", None]
and not self.dtypes.apply(is_extension_array_dtype).any()
# Disallow complex_internals since libreduction shortcut
# cannot handle MultiIndex
and not self.agg_axis._has_complex_internals
):
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
# Preserve subclass for e.g. test_subclassed_apply
dummy = self.obj._constructor_sliced(
empty_arr, index=index, dtype=values.dtype
)
try:
result = libreduction.compute_reduction(
values, self.f, axis=self.axis, dummy=dummy, labels=labels
)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
except TypeError:
# e.g. test_apply_ignore_failures we just ignore
if not self.ignore_failures:
raise
except ZeroDivisionError:
# reached via numexpr; fall back to python implementation
pass
else:
return self.obj._constructor_sliced(result, index=labels)
# compute the result using the series generator
self.apply_series_generator()
# wrap results
return self.wrap_results()
def apply_series_generator(self):
series_gen = self.series_generator
res_index = self.result_index
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = self.f(v)
except Exception:
pass
else:
keys.append(v.name)
successes.append(i)
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
for i, v in enumerate(series_gen):
results[i] = self.f(v)
keys.append(v.name)
self.results = results
self.res_index = res_index
self.res_columns = self.result_columns
def wrap_results(self):
results = self.results
# see if we can infer the results
if len(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis()
# dict of scalars
result = self.obj._constructor_sliced(results)
result.index = self.res_index
return result
class FrameRowApply(FrameApply):
axis = 0
def apply_broadcast(self):
return super().apply_broadcast(self.obj)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
if len(result.index) == len(self.res_columns):
result.index = self.res_columns
if len(result.columns) == len(self.res_index):
result.columns = self.res_index
return result
class FrameColumnApply(FrameApply):
axis = 1
def apply_broadcast(self):
result = super().apply_broadcast(self.obj.T)
return result.T
@property
def series_generator(self):
constructor = self.obj._constructor_sliced
return (
constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values, self.index))
)
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == "expand":
result = self.infer_to_same_shape()
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
from pandas import Series
result = Series(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
| 28.745721
| 88
| 0.575572
| 1,409
| 11,757
| 4.676366
| 0.176721
| 0.030809
| 0.027622
| 0.01548
| 0.222492
| 0.148733
| 0.108059
| 0.078009
| 0.06435
| 0.048262
| 0
| 0.003227
| 0.340988
| 11,757
| 408
| 89
| 28.816176
| 0.847186
| 0.150549
| 0
| 0.328244
| 0
| 0
| 0.025836
| 0
| 0
| 0
| 0
| 0.002451
| 0
| 1
| 0.099237
| false
| 0.01145
| 0.030534
| 0.045802
| 0.28626
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9f7ffc0611459c276e6f9ae99c70b7e8ba1a1c3
| 707
|
py
|
Python
|
tests/test_model/test_recognizer/test_shufflenetv1.py
|
YinAoXiong/ZCls
|
8aeea3640f8456937db35d043e37cf2c03ac9017
|
[
"Apache-2.0"
] | null | null | null |
tests/test_model/test_recognizer/test_shufflenetv1.py
|
YinAoXiong/ZCls
|
8aeea3640f8456937db35d043e37cf2c03ac9017
|
[
"Apache-2.0"
] | null | null | null |
tests/test_model/test_recognizer/test_shufflenetv1.py
|
YinAoXiong/ZCls
|
8aeea3640f8456937db35d043e37cf2c03ac9017
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@date: 2021/5/16 下午10:22
@file: test_shufflenetv1.py
@author: zj
@description:
"""
import torch
from zcls.config import cfg
from zcls.config.key_word import KEY_OUTPUT
from zcls.model.recognizers.build import build_recognizer
def test_data(model):
data = torch.randn(1, 3, 224, 224)
outputs = model(data)[KEY_OUTPUT]
print(outputs.shape)
assert outputs.shape == (1, 1000)
def test_shufflenet():
cfg.merge_from_file('configs/benchmarks/shufflenet/shufflenet_v1_3g2x_zcls_imagenet_224.yaml')
print(cfg)
model = build_recognizer(cfg, torch.device('cpu'))
print(model)
test_data(model)
if __name__ == '__main__':
test_shufflenet()
| 19.638889
| 98
| 0.711457
| 99
| 707
| 4.828283
| 0.535354
| 0.050209
| 0.058577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053872
| 0.15983
| 707
| 35
| 99
| 20.2
| 0.750842
| 0.144272
| 0
| 0
| 0
| 0
| 0.137353
| 0.118928
| 0
| 0
| 0
| 0
| 0.058824
| 1
| 0.117647
| false
| 0
| 0.235294
| 0
| 0.352941
| 0.176471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9f92ab910680bac296e7b003e06e2747df83ea4
| 882
|
py
|
Python
|
day06/part1.py
|
bugra-yilmaz/adventofcode2021
|
136cb1d4fba42af4eea934a73714c93710c8741e
|
[
"MIT"
] | null | null | null |
day06/part1.py
|
bugra-yilmaz/adventofcode2021
|
136cb1d4fba42af4eea934a73714c93710c8741e
|
[
"MIT"
] | null | null | null |
day06/part1.py
|
bugra-yilmaz/adventofcode2021
|
136cb1d4fba42af4eea934a73714c93710c8741e
|
[
"MIT"
] | null | null | null |
import os.path
from collections import Counter
import pytest
INPUT_TXT = os.path.join(os.path.dirname(__file__), 'input.txt')
def compute(s: str) -> int:
lines = s.splitlines()
numbers = Counter(int(f) for f in lines[0].split(","))
for d in range(80):
numbers2 = Counter({8: numbers[0], 6: numbers[0]})
for k, v in numbers.items():
if k >= 1:
numbers2[k - 1] += v
numbers = numbers2
return sum(numbers.values())
INPUT_S = '''\
3,4,3,1,2
'''
EXPECTED = 5934
@pytest.mark.parametrize(
('input_s', 'expected'),
(
(INPUT_S, EXPECTED),
),
)
def test(input_s: str, expected: int) -> None:
assert compute(input_s) == expected
def main() -> int:
with open(INPUT_TXT, "r") as f:
print(compute(f.read()))
return 0
if __name__ == '__main__':
raise SystemExit(main())
| 18.765957
| 64
| 0.580499
| 123
| 882
| 4.00813
| 0.479675
| 0.060852
| 0.085193
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033588
| 0.25737
| 882
| 46
| 65
| 19.173913
| 0.719084
| 0
| 0
| 0
| 0
| 0
| 0.052154
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 1
| 0.09375
| false
| 0
| 0.09375
| 0
| 0.25
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9fb745b63e853aa5e221b1f87db67c0723efc2d
| 394
|
py
|
Python
|
zmq_srv.py
|
iyedb/boost_asio_zeromq
|
63110c18540c8303ac29d574f25cba234a00a22d
|
[
"MIT"
] | 4
|
2015-04-07T06:00:34.000Z
|
2019-09-10T01:45:41.000Z
|
zmq_srv.py
|
iyedb/boost_asio_zeromq
|
63110c18540c8303ac29d574f25cba234a00a22d
|
[
"MIT"
] | null | null | null |
zmq_srv.py
|
iyedb/boost_asio_zeromq
|
63110c18540c8303ac29d574f25cba234a00a22d
|
[
"MIT"
] | 3
|
2015-06-30T07:37:41.000Z
|
2019-09-10T01:45:47.000Z
|
from __future__ import print_function
import zmq
import time
ADDR='tcp://127.0.0.1:11155'
ctx = zmq.Context()
srv = ctx.socket(zmq.REP)
srv.bind(ADDR)
#srv.setsockopt(zmq.RCVTIMEO, 3000);
while True:
try:
msg = srv.recv()
except Exception as e:
print('zmq socket revc timedout:', e)
else:
print('client says: %s' % msg)
srv.send('hi from server')
time.sleep(2)
| 17.130435
| 41
| 0.659898
| 62
| 394
| 4.112903
| 0.677419
| 0.047059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050314
| 0.192893
| 394
| 22
| 42
| 17.909091
| 0.751572
| 0.088832
| 0
| 0
| 0
| 0
| 0.209497
| 0.058659
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0.1875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9fdf7b2da8d5e9203d4272f61f62e3af6000e66
| 10,408
|
py
|
Python
|
mypy/server/aststrip.py
|
mmaryada27/mypy
|
39103273d705fe45a55c4879779a0d5567f01876
|
[
"PSF-2.0"
] | null | null | null |
mypy/server/aststrip.py
|
mmaryada27/mypy
|
39103273d705fe45a55c4879779a0d5567f01876
|
[
"PSF-2.0"
] | null | null | null |
mypy/server/aststrip.py
|
mmaryada27/mypy
|
39103273d705fe45a55c4879779a0d5567f01876
|
[
"PSF-2.0"
] | null | null | null |
"""Strip/reset AST in-place to match state after semantic analysis pass 1.
Fine-grained incremental mode reruns semantic analysis (passes 2 and 3)
and type checking for *existing* AST nodes (targets) when changes are
propagated using fine-grained dependencies. AST nodes attributes are
often changed during semantic analysis passes 2 and 3, and running
semantic analysis again on those nodes would produce incorrect
results, since these passes aren't idempotent. This pass resets AST
nodes to reflect the state after semantic analysis pass 1, so that we
can rerun semantic analysis.
(The above is in contrast to behavior with modules that have source code
changes, for which we reparse the entire module and reconstruct a fresh
AST. No stripping is required in this case. Both modes of operation should
have the same outcome.)
Notes:
* This is currently pretty fragile, as we must carefully undo whatever
changes can be made in semantic analysis passes 2 and 3, including changes
to symbol tables.
* We reuse existing AST nodes because it makes it relatively straightforward
to reprocess only a single target within a module efficiently. If there
was a way to parse a single target within a file, in time proportional to
the size of the target, we'd rather create fresh AST nodes than strip them.
Alas, no such facility exists and building it is non-trivial.
* Currently we don't actually reset all changes, but only those known to affect
non-idempotent semantic analysis behavior.
TODO: It would be more principled and less fragile to reset everything
changed in semantic analysis pass 2 and later.
* Reprocessing may recreate AST nodes (such as Var nodes, and TypeInfo nodes
created with assignment statements) that will get different identities from
the original AST. Thus running an AST merge is necessary after stripping,
even though some identities are preserved.
"""
import contextlib
from typing import Union, Iterator, Optional
from mypy.nodes import (
Node, FuncDef, NameExpr, MemberExpr, RefExpr, MypyFile, FuncItem, ClassDef, AssignmentStmt,
ImportFrom, Import, TypeInfo, SymbolTable, Var, CallExpr, Decorator, OverloadedFuncDef,
SuperExpr, UNBOUND_IMPORTED, GDEF, MDEF, IndexExpr
)
from mypy.traverser import TraverserVisitor
def strip_target(node: Union[MypyFile, FuncItem, OverloadedFuncDef]) -> None:
"""Reset a fine-grained incremental target to state after semantic analysis pass 1.
NOTE: Currently we opportunistically only reset changes that are known to otherwise
cause trouble.
"""
visitor = NodeStripVisitor()
if isinstance(node, MypyFile):
visitor.strip_file_top_level(node)
else:
node.accept(visitor)
class NodeStripVisitor(TraverserVisitor):
def __init__(self) -> None:
self.type = None # type: Optional[TypeInfo]
self.names = None # type: Optional[SymbolTable]
self.is_class_body = False
# By default, process function definitions. If False, don't -- this is used for
# processing module top levels.
self.recurse_into_functions = True
def strip_file_top_level(self, file_node: MypyFile) -> None:
"""Strip a module top-level (don't recursive into functions)."""
self.names = file_node.names
self.recurse_into_functions = False
file_node.accept(self)
def visit_class_def(self, node: ClassDef) -> None:
"""Strip class body and type info, but don't strip methods."""
node.info.type_vars = []
node.info.bases = []
node.info.abstract_attributes = []
node.info.mro = []
node.info.add_type_vars()
node.info.tuple_type = None
node.info.typeddict_type = None
node.info._cache = set()
node.info._cache_proper = set()
node.base_type_exprs.extend(node.removed_base_type_exprs)
node.removed_base_type_exprs = []
with self.enter_class(node.info):
super().visit_class_def(node)
def visit_func_def(self, node: FuncDef) -> None:
if not self.recurse_into_functions:
return
node.expanded = []
node.type = node.unanalyzed_type
with self.enter_method(node.info) if node.info else nothing():
super().visit_func_def(node)
def visit_decorator(self, node: Decorator) -> None:
node.var.type = None
for expr in node.decorators:
expr.accept(self)
if self.recurse_into_functions:
node.func.accept(self)
def visit_overloaded_func_def(self, node: OverloadedFuncDef) -> None:
if not self.recurse_into_functions:
return
if node.impl:
# Revert change made during semantic analysis pass 2.
assert node.items[-1] is not node.impl
node.items.append(node.impl)
super().visit_overloaded_func_def(node)
@contextlib.contextmanager
def enter_class(self, info: TypeInfo) -> Iterator[None]:
# TODO: Update and restore self.names
old_type = self.type
old_is_class_body = self.is_class_body
self.type = info
self.is_class_body = True
yield
self.type = old_type
self.is_class_body = old_is_class_body
@contextlib.contextmanager
def enter_method(self, info: TypeInfo) -> Iterator[None]:
# TODO: Update and restore self.names
old_type = self.type
old_is_class_body = self.is_class_body
self.type = info
self.is_class_body = False
yield
self.type = old_type
self.is_class_body = old_is_class_body
def visit_assignment_stmt(self, node: AssignmentStmt) -> None:
node.type = node.unanalyzed_type
if self.type and not self.is_class_body:
# TODO: Handle multiple assignment
if len(node.lvalues) == 1:
lvalue = node.lvalues[0]
if isinstance(lvalue, MemberExpr) and lvalue.is_new_def:
# Remove defined attribute from the class symbol table. If is_new_def is
# true for a MemberExpr, we know that it must be an assignment through
# self, since only those can define new attributes.
del self.type.names[lvalue.name]
super().visit_assignment_stmt(node)
def visit_import_from(self, node: ImportFrom) -> None:
if node.assignments:
node.assignments = []
else:
if self.names:
# Reset entries in the symbol table. This is necessary since
# otherwise the semantic analyzer will think that the import
# assigns to an existing name instead of defining a new one.
for name, as_name in node.names:
imported_name = as_name or name
symnode = self.names[imported_name]
symnode.kind = UNBOUND_IMPORTED
symnode.node = None
def visit_import(self, node: Import) -> None:
if node.assignments:
node.assignments = []
else:
if self.names:
# Reset entries in the symbol table. This is necessary since
# otherwise the semantic analyzer will think that the import
# assigns to an existing name instead of defining a new one.
for name, as_name in node.ids:
imported_name = as_name or name
initial = imported_name.split('.')[0]
symnode = self.names[initial]
symnode.kind = UNBOUND_IMPORTED
symnode.node = None
def visit_name_expr(self, node: NameExpr) -> None:
# Global assignments are processed in semantic analysis pass 1, and we
# only want to strip changes made in passes 2 or later.
if not (node.kind == GDEF and node.is_new_def):
# Remove defined attributes so that they can recreated during semantic analysis.
if node.kind == MDEF and node.is_new_def:
self.strip_class_attr(node.name)
self.strip_ref_expr(node)
def visit_member_expr(self, node: MemberExpr) -> None:
self.strip_ref_expr(node)
# These need to cleared for member expressions but not for other RefExprs since
# these can change based on changed in a base class.
node.is_new_def = False
node.is_inferred_def = False
if self.is_duplicate_attribute_def(node):
# This is marked as an instance variable definition but a base class
# defines an attribute with the same name, and we can't have
# multiple definitions for an attribute. Defer to the base class
# definition.
self.strip_class_attr(node.name)
node.def_var = None
super().visit_member_expr(node)
def visit_index_expr(self, node: IndexExpr) -> None:
node.analyzed = None # was a type alias
super().visit_index_expr(node)
def strip_class_attr(self, name: str) -> None:
if self.type is not None:
del self.type.names[name]
def is_duplicate_attribute_def(self, node: MemberExpr) -> bool:
if not node.is_inferred_def:
return False
assert self.type is not None, "Internal error: Member defined outside class"
if node.name not in self.type.names:
return False
return any(info.get(node.name) is not None for info in self.type.mro[1:])
def strip_ref_expr(self, node: RefExpr) -> None:
node.kind = None
node.node = None
node.fullname = None
node.is_new_def = False
node.is_inferred_def = False
def visit_call_expr(self, node: CallExpr) -> None:
node.analyzed = None
super().visit_call_expr(node)
def visit_super_expr(self, node: SuperExpr) -> None:
node.info = None
super().visit_super_expr(node)
# TODO: handle more node types
def is_self_member_ref(memberexpr: MemberExpr) -> bool:
"""Does memberexpr refer to an attribute of self?"""
# TODO: Merge with is_self_member_ref in semanal.py.
if not isinstance(memberexpr.expr, NameExpr):
return False
node = memberexpr.expr.node
return isinstance(node, Var) and node.is_self
@contextlib.contextmanager
def nothing() -> Iterator[None]:
yield
| 41.13834
| 95
| 0.662952
| 1,391
| 10,408
| 4.83537
| 0.24371
| 0.016652
| 0.019625
| 0.017841
| 0.241005
| 0.195956
| 0.163247
| 0.154326
| 0.14273
| 0.128159
| 0
| 0.002363
| 0.267967
| 10,408
| 252
| 96
| 41.301587
| 0.880431
| 0.359531
| 0
| 0.344371
| 0
| 0
| 0.00682
| 0
| 0
| 0
| 0
| 0.007937
| 0.013245
| 1
| 0.145695
| false
| 0
| 0.092715
| 0
| 0.291391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d9fe6882b9e62ad1b9764fdded272caab1b5cf79
| 9,991
|
py
|
Python
|
lib/spack/spack/multimethod.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2020-09-10T22:50:08.000Z
|
2021-01-12T22:18:54.000Z
|
lib/spack/spack/multimethod.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 17
|
2019-03-21T15:54:00.000Z
|
2022-03-29T19:34:28.000Z
|
lib/spack/spack/multimethod.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2018-04-06T09:04:11.000Z
|
2020-01-24T12:52:12.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This module contains utilities for using multi-methods in
spack. You can think of multi-methods like overloaded methods --
they're methods with the same name, and we need to select a version
of the method based on some criteria. e.g., for overloaded
methods, you would select a version of the method to call based on
the types of its arguments.
In spack, multi-methods are used to ease the life of package
authors. They allow methods like install() (or other methods
called by install()) to declare multiple versions to be called when
the package is instantiated with different specs. e.g., if the
package is built with OpenMPI on x86_64,, you might want to call a
different install method than if it was built for mpich2 on
BlueGene/Q. Likewise, you might want to do a different type of
install for different versions of the package.
Multi-methods provide a simple decorator-based syntax for this that
avoids overly complicated rat nests of if statements. Obviously,
depending on the scenario, regular old conditionals might be clearer,
so package authors should use their judgement.
"""
import functools
import inspect
from llnl.util.lang import caller_locals
import spack.architecture
import spack.error
from spack.spec import Spec
class MultiMethodMeta(type):
"""This allows us to track the class's dict during instantiation."""
#: saved dictionary of attrs on the class being constructed
_locals = None
@classmethod
def __prepare__(cls, name, bases, **kwargs):
"""Save the dictionary that will be used for the class namespace."""
MultiMethodMeta._locals = dict()
return MultiMethodMeta._locals
def __init__(cls, name, bases, attr_dict):
"""Clear out the cached locals dict once the class is built."""
MultiMethodMeta._locals = None
super(MultiMethodMeta, cls).__init__(name, bases, attr_dict)
class SpecMultiMethod(object):
"""This implements a multi-method for Spack specs. Packages are
instantiated with a particular spec, and you may want to
execute different versions of methods based on what the spec
looks like. For example, you might want to call a different
version of install() for one platform than you call on another.
The SpecMultiMethod class implements a callable object that
handles method dispatch. When it is called, it looks through
registered methods and their associated specs, and it tries
to find one that matches the package's spec. If it finds one
(and only one), it will call that method.
This is intended for use with decorators (see below). The
decorator (see docs below) creates SpecMultiMethods and
registers method versions with them.
To register a method, you can do something like this:
mm = SpecMultiMethod()
mm.register("^chaos_5_x86_64_ib", some_method)
The object registered needs to be a Spec or some string that
will parse to be a valid spec.
When the mm is actually called, it selects a version of the
method to call based on the sys_type of the object it is
called on.
See the docs for decorators below for more details.
"""
def __init__(self, default=None):
self.method_list = []
self.default = default
if default:
functools.update_wrapper(self, default)
def register(self, spec, method):
"""Register a version of a method for a particular spec."""
self.method_list.append((spec, method))
if not hasattr(self, '__name__'):
functools.update_wrapper(self, method)
else:
assert(self.__name__ == method.__name__)
def __get__(self, obj, objtype):
"""This makes __call__ support instance methods."""
# Method_list is a list of tuples (constraint, method)
# Here we are going to assume that we have at least one
# element in the list. The first registered function
# will be the one 'wrapped'.
wrapped_method = self.method_list[0][1]
# Call functools.wraps manually to get all the attributes
# we need to be disguised as the wrapped_method
func = functools.wraps(wrapped_method)(
functools.partial(self.__call__, obj)
)
return func
def _get_method_by_spec(self, spec):
"""Find the method of this SpecMultiMethod object that satisfies the
given spec, if one exists
"""
for condition, method in self.method_list:
if spec.satisfies(condition):
return method
return self.default or None
def __call__(self, package_self, *args, **kwargs):
"""Find the first method with a spec that matches the
package's spec. If none is found, call the default
or if there is none, then raise a NoSuchMethodError.
"""
spec_method = self._get_method_by_spec(package_self.spec)
if spec_method:
return spec_method(package_self, *args, **kwargs)
# Unwrap the MRO of `package_self by hand. Note that we can't
# use `super()` here, because using `super()` recursively
# requires us to know the class of `package_self`, as well as
# its superclasses for successive calls. We don't have that
# information within `SpecMultiMethod`, because it is not
# associated with the package class.
for cls in inspect.getmro(package_self.__class__)[1:]:
superself = cls.__dict__.get(self.__name__, None)
if isinstance(superself, SpecMultiMethod):
# Check parent multimethod for method for spec.
superself_method = superself._get_method_by_spec(
package_self.spec
)
if superself_method:
return superself_method(package_self, *args, **kwargs)
elif superself:
return superself(package_self, *args, **kwargs)
raise NoSuchMethodError(
type(package_self), self.__name__, package_self.spec,
[m[0] for m in self.method_list]
)
class when(object):
"""This annotation lets packages declare multiple versions of
methods like install() that depend on the package's spec.
For example:
.. code-block:: python
class SomePackage(Package):
...
def install(self, prefix):
# Do default install
@when('target=x86_64:')
def install(self, prefix):
# This will be executed instead of the default install if
# the package's target is in the x86_64 family.
@when('target=ppc64:')
def install(self, prefix):
# This will be executed if the package's target is in
# the ppc64 family
This allows each package to have a default version of install() AND
specialized versions for particular platforms. The version that is
called depends on the architecutre of the instantiated package.
Note that this works for methods other than install, as well. So,
if you only have part of the install that is platform specific, you
could do this:
.. code-block:: python
class SomePackage(Package):
...
# virtual dependence on MPI.
# could resolve to mpich, mpich2, OpenMPI
depends_on('mpi')
def setup(self):
# do nothing in the default case
pass
@when('^openmpi')
def setup(self):
# do something special when this is built with OpenMPI for
# its MPI implementations.
def install(self, prefix):
# Do common install stuff
self.setup()
# Do more common install stuff
Note that the default version of decorated methods must
*always* come first. Otherwise it will override all of the
platform-specific versions. There's not much we can do to get
around this because of the way decorators work.
"""
def __init__(self, condition):
if isinstance(condition, bool):
self.spec = Spec() if condition else None
else:
self.spec = Spec(condition)
def __call__(self, method):
# In Python 2, Get the first definition of the method in the
# calling scope by looking at the caller's locals. In Python 3,
# we handle this using MultiMethodMeta.__prepare__.
if MultiMethodMeta._locals is None:
MultiMethodMeta._locals = caller_locals()
# Create a multimethod with this name if there is not one already
original_method = MultiMethodMeta._locals.get(method.__name__)
if not type(original_method) == SpecMultiMethod:
original_method = SpecMultiMethod(original_method)
if self.spec is not None:
original_method.register(self.spec, method)
return original_method
class MultiMethodError(spack.error.SpackError):
"""Superclass for multimethod dispatch errors"""
def __init__(self, message):
super(MultiMethodError, self).__init__(message)
class NoSuchMethodError(spack.error.SpackError):
"""Raised when we can't find a version of a multi-method."""
def __init__(self, cls, method_name, spec, possible_specs):
super(NoSuchMethodError, self).__init__(
"Package %s does not support %s called with %s. Options are: %s"
% (cls.__name__, method_name, spec,
", ".join(str(s) for s in possible_specs)))
| 38.875486
| 77
| 0.651887
| 1,302
| 9,991
| 4.87404
| 0.268817
| 0.008667
| 0.007879
| 0.013237
| 0.102584
| 0.075796
| 0.058935
| 0.041286
| 0.011031
| 0.011031
| 0
| 0.005444
| 0.282955
| 9,991
| 256
| 78
| 39.027344
| 0.880374
| 0.57772
| 0
| 0.025
| 0
| 0
| 0.020088
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 1
| 0.1375
| false
| 0
| 0.075
| 0
| 0.3875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a00049d0a23118a6b45ced9a50bf455984aaa3c
| 8,974
|
py
|
Python
|
paperstream/create_diary.py
|
MarcoRosso/paperstream
|
f8d5485ea337334b036393f9566b74394b5dd234
|
[
"MIT"
] | null | null | null |
paperstream/create_diary.py
|
MarcoRosso/paperstream
|
f8d5485ea337334b036393f9566b74394b5dd234
|
[
"MIT"
] | null | null | null |
paperstream/create_diary.py
|
MarcoRosso/paperstream
|
f8d5485ea337334b036393f9566b74394b5dd234
|
[
"MIT"
] | null | null | null |
"""
Create diaries in A5 and A4 sizes based on PDF templates.
Julio Vega
"""
import datetime
import math
import sys
from io import BytesIO
from pathlib import Path
from PyPDF2 import PdfFileReader, PdfFileWriter
from reportlab.lib.pagesizes import A5, A4
from reportlab.lib.utils import ImageReader
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFError, TTFont
from reportlab.pdfgen import canvas
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = getattr(sys, '_MEIPASS', Path(__file__).resolve().parent)
return base_path / Path(relative_path)
CORNER_DIR = resource_path("input/1_diaries_to_create/resources")
LOGO_PATH = resource_path(CORNER_DIR / Path("logo.png"))
DEFAULT_FONT = resource_path(CORNER_DIR / Path('FreeSansLocal.ttf'))
CREATED_DIARIES_DIR = resource_path("output/created_diaries/")
#############################################################
#############################################################
#############################################################
##### Algorithm to convert A4 pages into an A5 booklet ######
#############################################################
#############################################################
#############################################################
## Adapted from the work by Luke Plant, https://bitbucket.org/spookylukey/booklet-maker/src
class Sheet(object):
'''A4 Sheets'''
def __init__(self):
self.front = PrintPage()
self.back = PrintPage()
class PrintPage(object):
'''A4 page with containers for A4 pages'''
def __init__(self):
self.left = PageContainer()
self.right = PageContainer()
class PageContainer(object):
'''A5 containers'''
def __init__(self):
self.page = None
def build_booklet(pages):
''' Build booklet '''
# Double sized page, with double-sided printing, fits 4 of the original.
sheet_count = int(math.ceil(len(pages) / 4.0))
booklet = [Sheet() for i in range(0, sheet_count)]
# Assign input pages to sheets
# This is the core algo. To understand it:
# * pick up 3 A4 sheets, landscape
# * number the sheets from 1 to 3, starting with bottom one
# * fold the stack in the middle to form an A5 booklet
# * work out what order you need to use the front left,
# front right, back left and back right sides.
def containers():
'''Yields parts of the booklet in the order they should be used.'''
for sheet in booklet:
yield sheet.back.right
yield sheet.front.left
for sheet in reversed(booklet):
yield sheet.front.right
yield sheet.back.left
for container, page in zip(containers(), pages):
container.page = page
return booklet
def add_double_page(writer, page_size, print_page):
''' Adds a double page '''
width, height = page_size
page = writer.insertBlankPage(width=width, height=height, index=writer.getNumPages())
# Merge the left page
l_page = print_page.left.page
if l_page is not None:
page.mergePage(l_page)
# Merge the right page with translation
r_page = print_page.right.page
if r_page is not None:
page.mergeTranslatedPage(r_page, width / 2, 0)
def convert_to_a5_booklet(input_file, blanks=0):
'''Converts a PDF into a double sided A5 file to print as an A4 (two A5 pages per A4 page)'''
# Create internal dir to save the a5 files
a5_booklets_dir = CREATED_DIARIES_DIR
Path.mkdir(a5_booklets_dir, parents=True, exist_ok=True)
# Create the a5 booklet's name
a5_booklet_name = Path(input_file).stem + "_as_a5_booklet"
a5_booklet = a5_booklets_dir / Path("{}.pdf".format(a5_booklet_name))
reader = PdfFileReader(open(input_file, "rb"))
pages = [reader.getPage(p) for p in range(0, reader.getNumPages())]
for index in range(0, blanks):
pages.insert(0, None)
sheets = build_booklet(pages)
writer = PdfFileWriter()
firs_page = reader.getPage(0)
input_width = firs_page.mediaBox.getWidth()
output_width = input_width * 2
input_height = firs_page.mediaBox.getHeight()
output_height = input_height
page_size = (output_width, output_height)
# We want to group fronts and backs together.
for sheet in sheets:
add_double_page(writer, page_size, sheet.back)
add_double_page(writer, page_size, sheet.front)
with open(a5_booklet, "wb") as a5_booklet_stream:
writer.write(a5_booklet_stream)
return a5_booklet
#############################################################
#############################################################
#############################################################
########## Create A4 paper diary ############
#############################################################
#############################################################
#############################################################
def create_diary_cover(participant_id, email, font):
'''Create cover of the A5 diary'''
packet = BytesIO()
cover_canvas = canvas.Canvas(packet, pagesize=A4)
width, height = A4
# Centering the logo or participant ID
if Path.exists(LOGO_PATH):
logo = ImageReader(LOGO_PATH)
cover_canvas.drawImage(logo, x=(width * (1/6.0)),
y=(height/4),
width=width * (4/6.0),
preserveAspectRatio=True,
mask='auto')
else:
cover_canvas.setFont(font, 50)
cover_canvas.drawCentredString(width/2, height/2, participant_id)
# Lost legend
if not (email is None or email == ""):
cover_canvas.setFont(font, 15)
cover_canvas.drawCentredString(width/2, 50,
"If you find this document, please email " + email)
cover_canvas.save()
packet.seek(0)
return PdfFileReader(packet).getPage(0)
def create_diary_page(pdf_template, font, top_left_text, page_number, top_right_text):
packet = BytesIO()
diary_canvas = canvas.Canvas(packet, pagesize=A5)
# Header
diary_canvas.setFont(font, 11)
#diary_canvas.drawRightString(378, 562, str(top_right_text))
diary_canvas.drawString(36.5, 562, top_left_text)
# Corners
corners = [(CORNER_DIR / Path("corner_ul.png"), 25, 553),
(CORNER_DIR / Path("corner_ur.png"), 365, 553),
(CORNER_DIR / Path("corner_bl.png"), 25, 15),
(CORNER_DIR / Path("corner_br.png"), 365, 15)]
for corner_path, x, y in corners:
if corner_path.exists():
corner = ImageReader(corner_path)
diary_canvas.drawImage(corner, x=x, y=y, mask='auto')
# Footer
#diary_canvas.setFont(font, 8)
#diary_canvas.drawString(36.5, 24, str(page_number))
diary_canvas.save()
# Merge template and additions (header, corners and footer)
packet.seek(0)
page_additions = PdfFileReader(packet).getPage(0)
new_page = PdfFileReader(open(pdf_template, "rb")).getPage(0)
new_page.mergePage(page_additions)
new_page.scaleTo(A4[0], A4[1])
return new_page
def create_a4_diary(pdf_template, pages, top_left_text, email=None, font='Arial'):
"""Creates an A4 document with [PAGES] from [STARTING_DATE]"""
starting_date = parse_date(top_left_text)
font = set_active_font(font)
# Create output folder/file
if not Path(pdf_template).exists():
raise ValueError("Template does not exist {}".format(pdf_template))
Path.mkdir(CREATED_DIARIES_DIR, parents=True, exist_ok=True)
a4_document_name = Path(pdf_template).stem
a4_document_path = CREATED_DIARIES_DIR / Path("{}_document.pdf".format(a4_document_name))
pdf_file = PdfFileWriter()
# Cover
pdf_file.addPage(create_diary_cover(a4_document_name, email, font))
pdf_file.addBlankPage()
# Pages
for page in range(1, pages+1):
if starting_date is not None:
top_left_text = starting_date.strftime('%A, %d %b %Y')
starting_date += datetime.timedelta(days=1)
new_page = create_diary_page(pdf_template, font, top_left_text,page, a4_document_name)
pdf_file.addPage(new_page)
# Backcover
pdf_file.addBlankPage()
# Save a4 document
with open(a4_document_path, "wb") as output_stream:
pdf_file.write(output_stream)
return a4_document_path
def set_active_font(font):
"""Register the font to use in header and footer of the diary"""
try:
pdfmetrics.registerFont(TTFont(font, font + '.ttf'))
except TTFError:
font = 'FreeSansLocal'
pdfmetrics.registerFont(TTFont(font, DEFAULT_FONT))
return font
def parse_date(s):
try:
return datetime.datetime.strptime(s, "%d/%m/%Y")
except ValueError:
return None
| 33.864151
| 97
| 0.613885
| 1,121
| 8,974
| 4.723461
| 0.257806
| 0.020397
| 0.014731
| 0.014353
| 0.109726
| 0.043626
| 0.029084
| 0.016997
| 0.016997
| 0.016997
| 0
| 0.018318
| 0.20916
| 8,974
| 265
| 98
| 33.864151
| 0.727772
| 0.17651
| 0
| 0.074324
| 0
| 0
| 0.046234
| 0.008879
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087838
| false
| 0.006757
| 0.074324
| 0
| 0.243243
| 0.02027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a009f467895ff4a7817d2ca2bfbdacdd183cb58
| 2,459
|
py
|
Python
|
wextractor/extractors/csv_extractor.py
|
codeforamerica/w-drive-extractor
|
1c62bfff6fc21c4cce4a4409b76355ec4e07daae
|
[
"MIT"
] | 3
|
2015-01-14T06:27:16.000Z
|
2015-02-26T23:39:39.000Z
|
wextractor/extractors/csv_extractor.py
|
codeforamerica/w-drive-extractor
|
1c62bfff6fc21c4cce4a4409b76355ec4e07daae
|
[
"MIT"
] | 8
|
2015-01-15T17:50:30.000Z
|
2015-05-12T17:09:04.000Z
|
wextractor/extractors/csv_extractor.py
|
codeforamerica/w-drive-extractor
|
1c62bfff6fc21c4cce4a4409b76355ec4e07daae
|
[
"MIT"
] | 4
|
2015-01-14T15:20:49.000Z
|
2021-04-16T10:45:22.000Z
|
#!/usr/bin/env python
import urllib2
import httplib
from urlparse import urlparse
import csv
from wextractor.extractors.extractor import Extractor
class CsvExtractor(Extractor):
def __init__(self, target, header=None, dtypes=None, url=None):
'''
CsvExtractor initializes with an optional url flag that tells
the extractor whether or not the resource is local or remote so
that it can be loaded accordingly
'''
super(CsvExtractor, self).__init__(target, header, dtypes)
if url is None:
self.url = self.detect_url(target)
elif type(url) != bool:
raise TypeError('url kwarg must be of type bool')
else:
self.url = url
def detect_url(self, target):
# see: http://stackoverflow.com/questions/2924422/how-do-i-determine-if-a-web-page-exists-with-shell-scripting
# and http://stackoverflow.com/questions/1140661/python-get-http-response-code-from-a-url
# for additional information
good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]
# check to see if we have a scheme in the url, and append one if not
parsed_target = urlparse(target)
if bool(parsed_target.scheme) is False:
target = 'http://' + target
host, path = urlparse(target)[1:3]
try:
conn = httplib.HTTPConnection(host)
conn.request("HEAD", path)
status = conn.getresponse().status
except StandardError:
status = None
return status in good_codes
def extract(self):
if self.url:
raw_data = urllib2.urlopen(self.target).read().decode('utf-8-sig').rstrip()
else:
with open(self.target, 'r') as f:
raw_data = f.read().decode('utf-8-sig').rstrip()
# standardize the file endings
raw_data = raw_data.replace('\r\n', '\n').replace('\r', '\n')
if self.header is None:
# use first line if self.header not defined
current_headers = raw_data.split('\n')[0].split(',')
raw_data = '\n'.join(raw_data.split('\n')[1:])
else:
current_headers = self.header
output = []
reader = csv.reader(raw_data.splitlines(), delimiter=',')
for row in reader:
output.append(
self.transform_row(current_headers, row)
)
return output
| 34.152778
| 118
| 0.604311
| 308
| 2,459
| 4.737013
| 0.467532
| 0.038382
| 0.027416
| 0.039753
| 0.031528
| 0.031528
| 0
| 0
| 0
| 0
| 0
| 0.012528
| 0.285889
| 2,459
| 71
| 119
| 34.633803
| 0.818337
| 0.220415
| 0
| 0.065217
| 0
| 0
| 0.041689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.108696
| 0
| 0.23913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a01b2b39f8bda22480b43b79a5034c95f31f8f0
| 9,010
|
py
|
Python
|
pyscf/geomopt/berny_solver.py
|
r-peng/pyscf
|
9a14f9bcc63bc75f5939cb4d00eb47861d8d8989
|
[
"Apache-2.0"
] | 2
|
2021-06-30T22:33:35.000Z
|
2021-11-22T18:02:36.000Z
|
pyscf/geomopt/berny_solver.py
|
r-peng/pyscf
|
9a14f9bcc63bc75f5939cb4d00eb47861d8d8989
|
[
"Apache-2.0"
] | null | null | null |
pyscf/geomopt/berny_solver.py
|
r-peng/pyscf
|
9a14f9bcc63bc75f5939cb4d00eb47861d8d8989
|
[
"Apache-2.0"
] | 2
|
2021-09-16T23:37:42.000Z
|
2021-10-14T23:00:39.000Z
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Interface to geometry optimizer pyberny https://github.com/jhrmnn/pyberny
'''
from __future__ import absolute_import
import pkg_resources
try:
dist = pkg_resources.get_distribution('pyberny')
except pkg_resources.DistributionNotFound:
dist = None
if dist is None or [int(x) for x in dist.version.split('.')] < [0, 6, 2]:
msg = ('Geometry optimizer Pyberny not found or outdated. Install or update '
'with:\n\n\tpip install -U pyberny')
raise ImportError(msg)
import time
import numpy
import logging
from pyscf import lib
from pyscf.geomopt.addons import (as_pyscf_method, dump_mol_geometry,
symmetrize)
from pyscf import __config__
from pyscf.grad.rhf import GradientsBasics
from berny import Berny, geomlib, coords
# Overwrite pyberny's atomic unit
coords.angstrom = 1./lib.param.BOHR
INCLUDE_GHOST = getattr(__config__, 'geomopt_berny_solver_optimize_include_ghost', True)
ASSERT_CONV = getattr(__config__, 'geomopt_berny_solver_optimize_assert_convergence', True)
def to_berny_geom(mol, include_ghost=INCLUDE_GHOST):
atom_charges = mol.atom_charges()
if include_ghost:
# Symbol Ghost is not supported in current version of pyberny
#species = [mol.atom_symbol(i) if z != 0 else 'Ghost'
# for i,z in enumerate(atom_charges)]
species = [mol.atom_symbol(i) if z != 0 else 'H'
for i,z in enumerate(atom_charges)]
coords = mol.atom_coords() * lib.param.BOHR
else:
atmlst = numpy.where(atom_charges != 0)[0] # Exclude ghost atoms
species = [mol.atom_symbol(i) for i in atmlst]
coords = mol.atom_coords()[atmlst] * lib.param.BOHR
# geomlib.Geometry is available in the new version of pyberny solver. (issue #212)
if getattr(geomlib, 'Geometry', None):
return geomlib.Geometry(species, coords)
else:
return geomlib.Molecule(species, coords)
def _geom_to_atom(mol, geom, include_ghost):
coords = geom.coords
if include_ghost:
atom_coords = coords / lib.param.BOHR
else:
atmlst = numpy.where(mol.atom_charges() != 0)[0]
atom_coords = mol.atom_coords()
atom_coords[atmlst] = coords / lib.param.BOHR
return atom_coords
def to_berny_log(pyscf_log):
'''Adapter to allow pyberny to use pyscf.logger
'''
class PyscfHandler(logging.Handler):
def emit(self, record):
pyscf_log.info(record.getMessage())
log = logging.getLogger('{}.{}'.format(__name__, id(pyscf_log)))
log.addHandler(PyscfHandler())
log.setLevel('INFO')
return log
def kernel(method, assert_convergence=ASSERT_CONV,
include_ghost=INCLUDE_GHOST, callback=None, **kwargs):
'''Optimize geometry with pyberny for the given method.
To adjust the convergence threshold, parameters can be set in kwargs as
below:
.. code-block:: python
conv_params = { # They are default settings
'gradientmax': 0.45e-3, # Eh/[Bohr|rad]
'gradientrms': 0.15e-3, # Eh/[Bohr|rad]
'stepmax': 1.8e-3, # [Bohr|rad]
'steprms': 1.2e-3, # [Bohr|rad]
}
from pyscf.geomopt import berny_solver
opt = berny_solver.GeometryOptimizer(method)
opt.params = conv_params
opt.kernel()
'''
t0 = time.clock(), time.time()
mol = method.mol.copy()
if 'log' in kwargs:
log = lib.logger.new_logger(method, kwargs['log'])
elif 'verbose' in kwargs:
log = lib.logger.new_logger(method, kwargs['verbose'])
else:
log = lib.logger.new_logger(method)
if isinstance(method, lib.GradScanner):
g_scanner = method
elif isinstance(method, GradientsBasics):
g_scanner = method.as_scanner()
elif getattr(method, 'nuc_grad_method', None):
g_scanner = method.nuc_grad_method().as_scanner()
else:
raise NotImplementedError('Nuclear gradients of %s not available' % method)
if not include_ghost:
g_scanner.atmlst = numpy.where(method.mol.atom_charges() != 0)[0]
# When symmetry is enabled, the molecule may be shifted or rotated to make
# the z-axis be the main axis. The transformation can cause inconsistency
# between the optimization steps. The transformation is muted by setting
# an explict point group to the keyword mol.symmetry (see symmetry
# detection code in Mole.build function).
if mol.symmetry:
mol.symmetry = mol.topgroup
# temporary interface, taken from berny.py optimize function
berny_log = to_berny_log(log)
geom = to_berny_geom(mol, include_ghost)
optimizer = Berny(geom, logger=berny_log, **kwargs)
t1 = t0
e_last = 0
for cycle, geom in enumerate(optimizer):
if log.verbose >= lib.logger.NOTE:
log.note('\nGeometry optimization cycle %d', cycle+1)
dump_mol_geometry(mol, geom.coords, log)
if mol.symmetry:
geom.coords = symmetrize(mol, geom.coords)
mol.set_geom_(_geom_to_atom(mol, geom, include_ghost), unit='Bohr')
energy, gradients = g_scanner(mol)
log.note('cycle %d: E = %.12g dE = %g norm(grad) = %g', cycle+1,
energy, energy - e_last, numpy.linalg.norm(gradients))
e_last = energy
if callable(callback):
callback(locals())
if assert_convergence and not g_scanner.converged:
raise RuntimeError('Nuclear gradients of %s not converged' % method)
optimizer.send((energy, gradients))
t1 = log.timer('geomoetry optimization cycle %d'%cycle, *t1)
t0 = log.timer('geomoetry optimization', *t0)
return optimizer._converged, mol
def optimize(method, assert_convergence=ASSERT_CONV,
include_ghost=INCLUDE_GHOST, callback=None, **kwargs):
'''Optimize geometry with pyberny for the given method.
To adjust the convergence threshold, parameters can be set in kwargs as
below:
.. code-block:: python
conv_params = { # They are default settings
'gradientmax': 0.45e-3, # Eh/[Bohr|rad]
'gradientrms': 0.15e-3, # Eh/[Bohr|rad]
'stepmax': 1.8e-3, # [Bohr|rad]
'steprms': 1.2e-3, # [Bohr|rad]
}
from pyscf.geomopt import berny_solver
newmol = berny_solver.optimize(method, **conv_params)
'''
return kernel(method, assert_convergence, include_ghost, callback,
**kwargs)[1]
class GeometryOptimizer(lib.StreamObject):
'''Optimize the molecular geometry for the input method.
Note the method.mol will be changed after calling .kernel() method.
'''
def __init__(self, method):
self.method = method
self.callback = None
self.params = {}
self.converged = False
self.max_cycle = 100
@property
def mol(self):
return self.method.mol
@mol.setter
def mol(self, x):
self.method.mol = x
def kernel(self, params=None):
if params is not None:
self.params.update(params)
params = dict(self.params)
params['maxsteps'] = self.max_cycle
self.converged, self.mol = \
kernel(self.method, callback=self.callback, **params)
return self.mol
optimize = kernel
del(INCLUDE_GHOST, ASSERT_CONV)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf, dft, cc, mp
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g')
mf = scf.RHF(mol)
conv_params = {
'gradientmax': 6e-3, # Eh/Bohr
'gradientrms': 2e-3, # Eh/Bohr
'stepmax': 2e-2, # Bohr
'steprms': 1.5e-2, # Bohr
}
mol1 = optimize(mf, **conv_params)
print(mf.kernel() - -153.219208484874)
print(scf.RHF(mol1).kernel() - -153.222680852335)
mf = dft.RKS(mol)
mf.xc = 'pbe,'
mf.conv_tol = 1e-7
mol1 = optimize(mf)
mymp2 = mp.MP2(scf.RHF(mol))
mol1 = optimize(mymp2)
mycc = cc.CCSD(scf.RHF(mol))
mol1 = optimize(mycc)
| 34.787645
| 91
| 0.642619
| 1,204
| 9,010
| 4.687708
| 0.274917
| 0.034018
| 0.007442
| 0.007087
| 0.245571
| 0.220943
| 0.193125
| 0.167789
| 0.148476
| 0.123671
| 0
| 0.038166
| 0.249723
| 9,010
| 258
| 92
| 34.922481
| 0.796746
| 0.287569
| 0
| 0.069182
| 0
| 0
| 0.131452
| 0.014588
| 0
| 0
| 0
| 0
| 0.037736
| 1
| 0.062893
| false
| 0
| 0.081761
| 0.006289
| 0.213836
| 0.012579
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a01ccf4f5933cd1046863655e9835118928c6fc
| 1,838
|
py
|
Python
|
src/main/python/taf/foundation/api/ui/aut.py
|
WesleyPeng/uiXautomation
|
2d2c4d5a774ffda934d5615036a80c449bac930d
|
[
"Apache-2.0"
] | 6
|
2017-09-19T15:05:47.000Z
|
2021-07-16T16:07:46.000Z
|
src/main/python/taf/foundation/api/ui/aut.py
|
WesleyPeng/uiXautomation
|
2d2c4d5a774ffda934d5615036a80c449bac930d
|
[
"Apache-2.0"
] | 1
|
2018-06-02T18:45:51.000Z
|
2018-06-02T18:45:51.000Z
|
src/main/python/taf/foundation/api/ui/aut.py
|
WesleyPeng/uiXautomation
|
2d2c4d5a774ffda934d5615036a80c449bac930d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017-2018 {Flair Inc.} WESLEY PENG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from taf.foundation.utils import ConnectionCache
class AUT(object):
cache = None
current = None
def __init__(
self,
name=None,
identifier=None,
**kwargs
):
if not AUT.cache:
AUT.cache = ConnectionCache(identifier)
self.id = self.cache.register(
self._create_instance(name, **kwargs),
identifier
)
AUT.current = self
@staticmethod
def launch(app_location, **kwargs):
raise NotImplementedError(
'Launch application'
)
def activate(self):
if self.id != self.cache.current_key:
self.cache.current_key = self.id
AUT.current = self
def take_screenshot(self):
self.activate()
return self.get_screenshot_data()
def close(self):
self.cache.close(self.id)
if not self.cache.current:
AUT.cache = None
AUT.current = None
def get_screenshot_data(self):
raise NotImplementedError(
'Get screenshot data from AUT'
)
def _create_instance(self, name, **kwargs):
raise NotImplementedError(
'Create instance of AUT'
)
| 25.887324
| 74
| 0.62568
| 220
| 1,838
| 5.154545
| 0.477273
| 0.05291
| 0.042328
| 0.028219
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009238
| 0.293254
| 1,838
| 70
| 75
| 26.257143
| 0.863741
| 0.307943
| 0
| 0.119048
| 0
| 0
| 0.054054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.02381
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a02d8606a3a24d720ef5682953d80e75a8dcabc
| 1,758
|
py
|
Python
|
algo/vigenere.py
|
dkushche/Crypto
|
75919d6df2084aee1de76c9999ac4e361c4efd48
|
[
"MIT"
] | 3
|
2020-05-07T22:03:48.000Z
|
2021-03-11T16:36:56.000Z
|
algo/vigenere.py
|
dkushche/Crypto
|
75919d6df2084aee1de76c9999ac4e361c4efd48
|
[
"MIT"
] | null | null | null |
algo/vigenere.py
|
dkushche/Crypto
|
75919d6df2084aee1de76c9999ac4e361c4efd48
|
[
"MIT"
] | null | null | null |
import crypto_tools
from itertools import cycle
def vigenere_little_doc():
return "encrypt/decrypt using vigenere cypher"
def vigenere_full_doc():
return """
Advanced caesar we change dict on each char
"""
def vigenere_str_to_list(string, vigenere_dict):
result = list()
for char in string:
try:
result.append(vigenere_dict.index(char))
except ValueError:
err_msg = f"There is no {key[inx]} in alphabet"
raise ValueError(err_msg)
return result
def vigenere_processing(data, key, lang, encrypt):
vigenere_dict = crypto_tools.get_param_json_data("alphabets.json", lang)
num_data = vigenere_str_to_list(data, vigenere_dict)
num_key = vigenere_str_to_list(key, vigenere_dict)
dict_size = len(vigenere_dict)
num_key = cycle(num_key)
if (encrypt == "encrypt"):
num_result = [(a + b) % dict_size for a, b in zip(num_data, num_key)]
else:
num_result = [
(a + dict_size - b) % dict_size for a, b in zip(num_data, num_key)
]
result_str = ""
for val in num_result:
result_str += vigenere_dict[val]
return result_str
@crypto_tools.file_manipulation()
def vigenere(data):
lang = crypto_tools.cterm('input', 'Data language: ', 'ans')
key = crypto_tools.cterm('input', 'Enter key(str): ', 'ans')
encrypt = crypto_tools.cterm('input',
'You want encrypt or decrypt: ', 'ans')
if encrypt != "encrypt" and encrypt != "decrypt":
raise ValueError("Incorrect action")
data = crypto_tools.utf_decoder(data)
return vigenere_processing(data, key, lang, encrypt)
vigenere.little_doc = vigenere_little_doc
vigenere.full_doc = vigenere_full_doc
| 30.310345
| 78
| 0.660978
| 236
| 1,758
| 4.669492
| 0.326271
| 0.069873
| 0.046279
| 0.046279
| 0.137931
| 0.137931
| 0.137931
| 0.058076
| 0.058076
| 0.058076
| 0
| 0
| 0.236064
| 1,758
| 57
| 79
| 30.842105
| 0.820551
| 0
| 0
| 0
| 0
| 0
| 0.147327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.044444
| 0.044444
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a036923cf292987a326de518f02ae1d70e60da4
| 974
|
py
|
Python
|
kiwi_scp/commands/cmd_cmd.py
|
yavook/kiwi-scp
|
ca4263d913cfbdedc8b14334e3cad61c3b95f0a7
|
[
"MIT"
] | null | null | null |
kiwi_scp/commands/cmd_cmd.py
|
yavook/kiwi-scp
|
ca4263d913cfbdedc8b14334e3cad61c3b95f0a7
|
[
"MIT"
] | null | null | null |
kiwi_scp/commands/cmd_cmd.py
|
yavook/kiwi-scp
|
ca4263d913cfbdedc8b14334e3cad61c3b95f0a7
|
[
"MIT"
] | null | null | null |
from typing import Tuple
import click
from .cmd import KiwiCommandType, KiwiCommand
from .decorators import kiwi_command
from ..executable import COMPOSE_EXE
from ..instance import Instance
from ..project import Project
@click.argument(
"compose_args",
metavar="[ARG]...",
nargs=-1,
)
@click.argument(
"compose_cmd",
metavar="COMMAND",
)
@kiwi_command(
short_help="Run docker-compose command",
# ignore arguments looking like options
# just pass everything down to docker-compose
context_settings={"ignore_unknown_options": True},
)
class CmdCommand(KiwiCommand):
"""Run raw docker-compose command in a project"""
type = KiwiCommandType.PROJECT
enabled_only = True
@classmethod
def run_for_project(cls, instance: Instance, project: Project, compose_cmd: str = None,
compose_args: Tuple[str] = None) -> None:
COMPOSE_EXE.run([compose_cmd, *compose_args], **project.process_kwargs)
| 26.324324
| 91
| 0.708419
| 116
| 974
| 5.793103
| 0.482759
| 0.049107
| 0.059524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001267
| 0.189938
| 974
| 36
| 92
| 27.055556
| 0.850444
| 0.129363
| 0
| 0.074074
| 0
| 0
| 0.102259
| 0.026159
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.259259
| 0
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a036ee66041ffdf97db3dd3911676a6d37fc888
| 4,339
|
py
|
Python
|
homework/Testing with Examples (Network)/impl_fail-add_relation-does_not_fail_when_person1_is_non_existent.py
|
rvprasad/software-testing-course
|
3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0
|
[
"CC-BY-4.0"
] | 11
|
2018-02-08T05:23:28.000Z
|
2021-05-24T13:23:56.000Z
|
homework/Testing with Examples (Network)/impl_fail-add_relation-does_not_fail_when_person1_is_non_existent.py
|
rvprasad/software-testing-course
|
3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0
|
[
"CC-BY-4.0"
] | null | null | null |
homework/Testing with Examples (Network)/impl_fail-add_relation-does_not_fail_when_person1_is_non_existent.py
|
rvprasad/software-testing-course
|
3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0
|
[
"CC-BY-4.0"
] | 2
|
2020-09-15T08:51:22.000Z
|
2021-01-26T12:07:18.000Z
|
class MyError(Exception):
pass
class PropertyContainer(object):
def __init__(self):
self.props = {}
def set_property(self, prop, value):
self.props[prop] = value
def get_property(self, prop):
return self.props.get(prop)
def has_property(self, prop):
return prop in self.props
class Node(PropertyContainer):
pass
class Edge(PropertyContainer):
def __init__(self, node1, node2):
super().__init__()
self.node1 = node1
self.node2 = node2
class Network(object):
NAME_PROP = "name" # NAME_PROP is an optional string property
FRIEND_PROP = "friend" # FRIEND_PROP is an optional boolean property
def __init__(self):
self.nodes = set()
self.edges = set()
def create_person(self):
node = Node()
self.nodes.add(node)
return node
# add prop to value; overwrite if prop exists
def add_person_property(self, person, prop, value):
# flag non-existent person
if person not in self.nodes:
raise RuntimeError("person does not exist")
if prop == Network.NAME_PROP:
# disallow non-string values for NAME_PROP property
if not isinstance(value, str):
raise TypeError(
"{0} is a string property".format(Network.NAME_PROP))
# disallow multiple people to have the same name
for p in self.nodes:
if p.get_property(Network.NAME_PROP) == value and \
p is not person:
raise ValueError("{0} name already taken".format(value))
person.set_property(prop, value)
def add_relation(self, person1, person2):
# flag non-existent persons
if person1 not in self.nodes:
# raise RuntimeError("person1 does not exist")
person1 = self.create_person()
if person2 not in self.nodes:
raise RuntimeError("person2 does not exist")
# flag existing edge
for e in self.edges:
if (e.node1 is person1 and e.node2 is person2) or \
(e.node1 is person2 and e.node2 is person1):
raise ValueError("relation exists")
self.edges.add(Edge(person1, person2))
def add_relation_property(self, person1, person2, prop, value):
# disallow non-boolean values for FRIEND_PROP property
if prop == Network.FRIEND_PROP and not isinstance(value, bool):
raise TypeError(
"{0} is a boolean property".format(Network.FRIEND_PROP))
for e in self.edges:
if (e.node1 is person1 and e.node2 is person2) or \
(e.node1 is person2 and e.node2 is person1):
e.set_property(prop, value)
return
# flag non-existent relation
raise RuntimeError("Non-existent relation")
# get a person with given name
def get_person(self, name):
# disallow non-string values for name
if not isinstance(name, str):
raise TypeError(
"{0} is a string argument".format(Network.NAME_PROP))
for n in self.nodes:
if n.get_property(Network.NAME_PROP) == name:
return n
# flag non-existent person
raise RuntimeError("No person named {0}".format(name))
# get friends of friends of a person with given name
def friends_of_friends(self, name):
# disallow non-string values for name
if not isinstance(name, str):
raise TypeError(
"{0} is a string argument".format(Network.NAME_PROP))
# flag non-existent person
person = self.get_person(name)
visited = set([person])
i = 0
while i < 2:
newly_visited = set()
for p in (x for x in visited):
for e in (x for x in self.edges if
x.get_property(Network.FRIEND_PROP) == True):
n1 = e.node1
n2 = e.node2
if n1 == p:
newly_visited.add(e.node2)
elif n2 == p:
newly_visited.add(e.node1)
visited = newly_visited
i += 1
return list(visited)
| 34.991935
| 76
| 0.569717
| 533
| 4,339
| 4.536585
| 0.178236
| 0.022333
| 0.037221
| 0.028122
| 0.295285
| 0.24483
| 0.174938
| 0.163772
| 0.163772
| 0.163772
| 0
| 0.017662
| 0.347546
| 4,339
| 123
| 77
| 35.276423
| 0.836454
| 0.144042
| 0
| 0.202247
| 0
| 0
| 0.061418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134831
| false
| 0.022472
| 0
| 0.022472
| 0.280899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a045d9a56c4a8715b77c0b2cd2d5ff977fa98ed
| 609
|
py
|
Python
|
conf/feature_config.py
|
pupuwudi/nlp_xiaojiang
|
182ac4522b6012a52de6e1d0db7e6a47cb716e5b
|
[
"MIT"
] | null | null | null |
conf/feature_config.py
|
pupuwudi/nlp_xiaojiang
|
182ac4522b6012a52de6e1d0db7e6a47cb716e5b
|
[
"MIT"
] | null | null | null |
conf/feature_config.py
|
pupuwudi/nlp_xiaojiang
|
182ac4522b6012a52de6e1d0db7e6a47cb716e5b
|
[
"MIT"
] | 2
|
2021-01-18T10:07:20.000Z
|
2022-01-12T10:09:47.000Z
|
# -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/5/10 9:13
# @author :Mo
# @function :path of FeatureProject
import pathlib
import sys
import os
# base dir
projectdir = str(pathlib.Path(os.path.abspath(__file__)).parent.parent)
sys.path.append(projectdir)
# path of BERT model
model_dir = projectdir + '/Data/chinese_L-12_H-768_A-12'
config_name = model_dir + '/bert_config.json'
ckpt_name = model_dir + '/bert_model.ckpt'
vocab_file = model_dir + '/vocab.txt'
# gpu使用率
gpu_memory_fraction = 0.32
# 默认取倒数第二层的输出值作为句向量
layer_indexes = [-2]
# 序列的最大程度
max_seq_len = 32
| 22.555556
| 72
| 0.689655
| 90
| 609
| 4.433333
| 0.655556
| 0.080201
| 0.06015
| 0.080201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047904
| 0.17734
| 609
| 26
| 73
| 23.423077
| 0.748503
| 0.284072
| 0
| 0
| 0
| 0
| 0.180451
| 0.072682
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a047a8d5dd4c7ba8745cc48738110ca5fef1d2f
| 813
|
py
|
Python
|
tests/test_prep_read.py
|
taruma/hidrokit
|
a96c4ba2235d58d2bbc905be44d1b413ed19b3d2
|
[
"MIT"
] | 5
|
2019-07-15T13:35:52.000Z
|
2020-04-01T17:34:16.000Z
|
tests/test_prep_read.py
|
taruma/hidrokit
|
a96c4ba2235d58d2bbc905be44d1b413ed19b3d2
|
[
"MIT"
] | 107
|
2019-01-03T02:12:26.000Z
|
2020-02-18T00:48:27.000Z
|
tests/test_prep_read.py
|
hidrokit/hidrokit
|
c8b949aa6a81981684a24e5dd1e498ec82cbe0ca
|
[
"MIT"
] | 2
|
2020-06-17T00:08:32.000Z
|
2020-08-24T18:55:38.000Z
|
"""Test for .prep.read module
"""
from hidrokit.prep import read
import numpy as np
import pandas as pd
A = pd.DataFrame(
data=[
[1, 3, 4, np.nan, 2, np.nan],
[np.nan, 2, 3, np.nan, 1, 4],
[2, np.nan, 1, 3, 4, np.nan]
],
columns=['A', 'B', 'C', 'D', 'E', 'F']
)
A_date = A.set_index(pd.date_range("20190617", "20190619"))
res_A_number = {'A': [1], 'B': [2], 'C': [], 'D': [0, 1], 'E': [], 'F': [0, 2]}
res_A_date = {'A': ['0618'], 'B': ['0619'], 'C': [],
'D': ['0617', '0618'], 'E': [], 'F': ['0617', '0619']}
def test_read_number():
test = read.missing_row(A, date_index=False)
assert test.items() == res_A_number.items()
def test_read_date():
test = read.missing_row(A_date, date_format="%m%d")
assert test.items() == res_A_date.items()
| 25.40625
| 79
| 0.526445
| 133
| 813
| 3.067669
| 0.353383
| 0.073529
| 0.014706
| 0.02451
| 0.245098
| 0.112745
| 0
| 0
| 0
| 0
| 0
| 0.091195
| 0.217712
| 813
| 31
| 80
| 26.225806
| 0.550314
| 0.03198
| 0
| 0
| 0
| 0
| 0.079487
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.095238
| false
| 0
| 0.142857
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a04b26d17a373b84c1afb19abef67f291bb970a
| 9,747
|
py
|
Python
|
src/train_DFCAN.py
|
ikecoglu/DL-SR
|
5e4c794f1434cd4a9b2b1aecf3738065b11bede1
|
[
"MIT"
] | 46
|
2021-01-07T03:38:07.000Z
|
2022-03-24T19:11:23.000Z
|
src/train_DFCAN.py
|
ikecoglu/DL-SR
|
5e4c794f1434cd4a9b2b1aecf3738065b11bede1
|
[
"MIT"
] | 7
|
2021-02-06T14:23:18.000Z
|
2022-02-13T04:08:45.000Z
|
src/train_DFCAN.py
|
ikecoglu/DL-SR
|
5e4c794f1434cd4a9b2b1aecf3738065b11bede1
|
[
"MIT"
] | 16
|
2021-01-26T16:22:49.000Z
|
2022-02-26T03:21:08.000Z
|
import argparse
from keras import optimizers
import matplotlib.pyplot as plt
import numpy as np
import datetime
from keras.callbacks import TensorBoard
import glob
import os
import tensorflow as tf
from models import *
from utils.lr_controller import ReduceLROnPlateau
from utils.data_loader import data_loader, data_loader_multi_channel
from utils.utils import img_comp
from utils.loss import loss_mse_ssim
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_id", type=int, default=1)
parser.add_argument("--gpu_memory_fraction", type=float, default=0.3)
parser.add_argument("--mixed_precision_training", type=int, default=1)
parser.add_argument("--data_dir", type=str, default="../dataset/train/F-actin")
parser.add_argument("--save_weights_dir", type=str, default="../trained_models")
parser.add_argument("--model_name", type=str, default="DFCAN")
parser.add_argument("--patch_height", type=int, default=128)
parser.add_argument("--patch_width", type=int, default=128)
parser.add_argument("--input_channels", type=int, default=9)
parser.add_argument("--scale_factor", type=int, default=2)
parser.add_argument("--norm_flag", type=int, default=1)
parser.add_argument("--iterations", type=int, default=1000000)
parser.add_argument("--sample_interval", type=int, default=1000)
parser.add_argument("--validate_interval", type=int, default=2000)
parser.add_argument("--validate_num", type=int, default=500)
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--start_lr", type=float, default=1e-4)
parser.add_argument("--lr_decay_factor", type=float, default=0.5)
parser.add_argument("--load_weights", type=int, default=0)
parser.add_argument("--optimizer_name", type=str, default="adam")
args = parser.parse_args()
gpu_id = str(args.gpu_id)
gpu_memory_fraction = args.gpu_memory_fraction
mixed_precision_training = str(args.mixed_precision_training)
data_dir = args.data_dir
save_weights_dir = args.save_weights_dir
validate_interval = args.validate_interval
batch_size = args.batch_size
start_lr = args.start_lr
lr_decay_factor = args.lr_decay_factor
patch_height = args.patch_height
patch_width = args.patch_width
input_channels = args.input_channels
scale_factor = args.scale_factor
norm_flag = args.norm_flag
validate_num = args.validate_num
iterations = args.iterations
load_weights = args.load_weights
optimizer_name = args.optimizer_name
model_name = args.model_name
sample_interval = args.sample_interval
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION"] = mixed_precision_training
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
data_name = data_dir.split('/')[-1]
if input_channels == 1:
save_weights_name = model_name + '-SISR_' + data_name
cur_data_loader = data_loader
train_images_path = data_dir + '/training_wf/'
validate_images_path = data_dir + '/validate_wf/'
else:
save_weights_name = model_name + '-SIM_' + data_name
cur_data_loader = data_loader_multi_channel
train_images_path = data_dir + '/training/'
validate_images_path = data_dir + '/validate/'
save_weights_path = save_weights_dir + '/' + save_weights_name + '/'
train_gt_path = data_dir + '/training_gt/'
validate_gt_path = data_dir + '/validate_gt/'
sample_path = save_weights_path + 'sampled_img/'
if not os.path.exists(save_weights_path):
os.mkdir(save_weights_path)
if not os.path.exists(sample_path):
os.mkdir(sample_path)
# --------------------------------------------------------------------------------
# select models and optimizer
# --------------------------------------------------------------------------------
modelFns = {'DFCAN': DFCAN16.DFCAN}
modelFN = modelFns[model_name]
optimizer_g = optimizers.adam(lr=start_lr, beta_1=0.9, beta_2=0.999)
# --------------------------------------------------------------------------------
# define combined model
# --------------------------------------------------------------------------------
g = modelFN((patch_height, patch_width, input_channels))
g.compile(loss=loss_mse_ssim, optimizer=optimizer_g)
lr_controller = ReduceLROnPlateau(model=g, factor=lr_decay_factor, patience=10, mode='min', min_delta=1e-4,
cooldown=0, min_lr=start_lr * 0.1, verbose=1)
# --------------------------------------------------------------------------------
# about Tensorboard
# --------------------------------------------------------------------------------
log_path = save_weights_path + 'graph'
if not os.path.exists(log_path):
os.mkdir(log_path)
callback = TensorBoard(log_path)
callback.set_model(g)
train_names = 'training_loss'
val_names = ['val_MSE', 'val_SSIM', 'val_PSNR', 'val_NRMSE']
def write_log(callback, names, logs, batch_no):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = logs
summary_value.tag = names
callback.writer.add_summary(summary, batch_no)
callback.writer.flush()
# --------------------------------------------------------------------------------
# Sample and validate
# --------------------------------------------------------------------------------
def Validate(iter, sample=0):
validate_path = glob.glob(validate_images_path + '*')
validate_path.sort()
if sample == 1:
r, c = 3, 3
mses, nrmses, psnrs, ssims = [], [], [], []
img_show, gt_show, output_show = [], [], []
validate_path = np.random.choice(validate_path, size=r)
for path in validate_path:
[img, gt] = cur_data_loader([path], validate_images_path, validate_gt_path, patch_height,
patch_width, 1, norm_flag=norm_flag, scale=scale_factor)
output = np.squeeze(g.predict(img))
mses, nrmses, psnrs, ssims = img_comp(gt, output, mses, nrmses, psnrs, ssims)
img_show.append(np.squeeze(np.mean(img, 3)))
gt_show.append(np.squeeze(gt))
output_show.append(output)
# show some examples
fig, axs = plt.subplots(r, c)
cnt = 0
for row in range(r):
axs[row, 1].set_title('MSE=%.4f, SSIM=%.4f, PSNR=%.4f' % (mses[row], ssims[row], psnrs[row]))
for col, image in enumerate([img_show, output_show, gt_show]):
axs[row, col].imshow(np.squeeze(image[row]))
axs[row, col].axis('off')
cnt += 1
fig.savefig(sample_path + '%d.png' % iter)
plt.close()
else:
if validate_num < validate_path.__len__():
validate_path = validate_path[0:validate_num]
mses, nrmses, psnrs, ssims = [], [], [], []
for path in validate_path:
[img, gt] = cur_data_loader([path], validate_images_path, validate_gt_path, patch_height,
patch_width, 1, norm_flag=norm_flag, scale=scale_factor)
output = np.squeeze(g.predict(img))
mses, nrmses, psnrs, ssims = img_comp(gt, output, mses, nrmses, psnrs, ssims)
# if best, save weights.best
g.save_weights(save_weights_path + 'weights.latest')
if min(validate_nrmse) > np.mean(nrmses):
g.save_weights(save_weights_path + 'weights.best')
validate_nrmse.append(np.mean(nrmses))
curlr = lr_controller.on_epoch_end(iter, np.mean(nrmses))
write_log(callback, val_names[0], np.mean(mses), iter)
write_log(callback, val_names[1], np.mean(ssims), iter)
write_log(callback, val_names[2], np.mean(psnrs), iter)
write_log(callback, val_names[3], np.mean(nrmses), iter)
write_log(callback, 'lr', curlr, iter)
# --------------------------------------------------------------------------------
# if exist, load weights
# --------------------------------------------------------------------------------
if load_weights:
if os.path.exists(save_weights_path + 'weights.best'):
g.save_weights(save_weights_path + 'weights.best')
print('Loading weights successfully: ' + save_weights_path + 'weights.best')
elif os.path.exists(save_weights_path + 'weights.latest'):
g.save_weights(save_weights_path + 'weights.latest')
print('Loading weights successfully: ' + save_weights_path + 'weights.latest')
# --------------------------------------------------------------------------------
# training
# --------------------------------------------------------------------------------
start_time = datetime.datetime.now()
loss_record = []
validate_nrmse = [np.Inf]
lr_controller.on_train_begin()
images_path = glob.glob(train_images_path + '/*')
for it in range(iterations):
# ------------------------------------
# train generator
# ------------------------------------
input_g, gt_g = cur_data_loader(images_path, train_images_path, train_gt_path, patch_height, patch_width,
batch_size, norm_flag=norm_flag, scale=scale_factor)
loss_generator = g.train_on_batch(input_g, gt_g)
loss_record.append(loss_generator)
elapsed_time = datetime.datetime.now() - start_time
print("%d epoch: time: %s, g_loss = %s" % (it + 1, elapsed_time, loss_generator))
if (it + 1) % sample_interval == 0:
images_path = glob.glob(train_images_path + '/*')
Validate(it + 1, sample=1)
if (it + 1) % validate_interval == 0:
Validate(it + 1, sample=0)
write_log(callback, train_names, np.mean(loss_record), it + 1)
loss_record = []
| 45.125
| 109
| 0.612804
| 1,204
| 9,747
| 4.671927
| 0.181063
| 0.048889
| 0.060444
| 0.031289
| 0.276978
| 0.2544
| 0.1936
| 0.125156
| 0.090667
| 0.074667
| 0
| 0.010528
| 0.171643
| 9,747
| 215
| 110
| 45.334884
| 0.686153
| 0.144968
| 0
| 0.128655
| 0
| 0
| 0.095187
| 0.012154
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011696
| false
| 0
| 0.081871
| 0
| 0.093567
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a04bef0858eef7458b1e38ddd409346a98cb2cc
| 2,635
|
py
|
Python
|
catalyst/exchange/live_graph_clock.py
|
erlendve/catalyst
|
463575bc23c0abd1287f8ec81c4377baabf2b8b8
|
[
"Apache-2.0"
] | null | null | null |
catalyst/exchange/live_graph_clock.py
|
erlendve/catalyst
|
463575bc23c0abd1287f8ec81c4377baabf2b8b8
|
[
"Apache-2.0"
] | null | null | null |
catalyst/exchange/live_graph_clock.py
|
erlendve/catalyst
|
463575bc23c0abd1287f8ec81c4377baabf2b8b8
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from catalyst.constants import LOG_LEVEL
from catalyst.exchange.utils.stats_utils import prepare_stats
from catalyst.gens.sim_engine import (
BAR,
SESSION_START
)
from logbook import Logger
log = Logger('LiveGraphClock', level=LOG_LEVEL)
class LiveGraphClock(object):
"""Realtime clock for live trading.
This class is a drop-in replacement for
:class:`zipline.gens.sim_engine.MinuteSimulationClock`.
This mixes the clock with a live graph.
Notes
-----
This seemingly awkward approach allows us to run the program using a single
thread. This is important because Matplotlib does not play nice with
multi-threaded environments. Zipline probably does not either.
Matplotlib has a pause() method which is a wrapper around time.sleep()
used in the SimpleClock. The key difference is that users
can still interact with the chart during the pause cycles. This is
what enables us to keep a single thread. This is also why we are not using
the 'animate' callback of Matplotlib. We need to direct access to the
__iter__ method in order to yield events to Zipline.
The :param:`time_skew` parameter represents the time difference between
the exchange and the live trading machine's clock. It's not used currently.
"""
def __init__(self, sessions, context, callback=None,
time_skew=pd.Timedelta('0s')):
self.sessions = sessions
self.time_skew = time_skew
self._last_emit = None
self._before_trading_start_bar_yielded = True
self.context = context
self.callback = callback
def __iter__(self):
from matplotlib import pyplot as plt
yield pd.Timestamp.utcnow(), SESSION_START
while True:
current_time = pd.Timestamp.utcnow()
current_minute = current_time.floor('1T')
if self._last_emit is None or current_minute > self._last_emit:
log.debug('emitting minutely bar: {}'.format(current_minute))
self._last_emit = current_minute
yield current_minute, BAR
recorded_cols = list(self.context.recorded_vars.keys())
df, _ = prepare_stats(
self.context.frame_stats, recorded_cols=recorded_cols
)
self.callback(self.context, df)
else:
# I can't use the "animate" reactive approach here because
# I need to yield from the main loop.
# Workaround: https://stackoverflow.com/a/33050617/814633
plt.pause(1)
| 35.133333
| 79
| 0.666414
| 343
| 2,635
| 4.970845
| 0.472303
| 0.038123
| 0.028152
| 0.019941
| 0.051613
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008831
| 0.26945
| 2,635
| 74
| 80
| 35.608108
| 0.876883
| 0.412144
| 0
| 0
| 0
| 0
| 0.029212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.171429
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a072b60d911bf4164d6e02341054f5f6f3f27f0
| 3,479
|
py
|
Python
|
nautobot_device_onboarding/tests/test_netdev_keeper.py
|
pszulczewski/nautobot-plugin-device-onboarding
|
9ddec52d7bcc751c4616bd7c1180ed2a1d31ff2c
|
[
"Apache-2.0"
] | 13
|
2021-03-05T10:47:50.000Z
|
2022-03-18T19:07:09.000Z
|
nautobot_device_onboarding/tests/test_netdev_keeper.py
|
pszulczewski/nautobot-plugin-device-onboarding
|
9ddec52d7bcc751c4616bd7c1180ed2a1d31ff2c
|
[
"Apache-2.0"
] | 18
|
2021-03-05T10:29:13.000Z
|
2022-03-08T13:10:38.000Z
|
nautobot_device_onboarding/tests/test_netdev_keeper.py
|
pszulczewski/nautobot-plugin-device-onboarding
|
9ddec52d7bcc751c4616bd7c1180ed2a1d31ff2c
|
[
"Apache-2.0"
] | 14
|
2021-03-06T19:33:46.000Z
|
2022-03-28T16:31:38.000Z
|
"""Unit tests for nautobot_device_onboarding.netdev_keeper module and its classes.
(c) 2020-2021 Network To Code
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from socket import gaierror
from unittest import mock
from django.test import TestCase
from nautobot.dcim.models import Site, DeviceRole, Platform
from nautobot_device_onboarding.exceptions import OnboardException
from nautobot_device_onboarding.helpers import onboarding_task_fqdn_to_ip
from nautobot_device_onboarding.models import OnboardingTask
class NetdevKeeperTestCase(TestCase):
"""Test the NetdevKeeper Class."""
def setUp(self):
"""Create a superuser and token for API calls."""
self.site1 = Site.objects.create(name="USWEST", slug="uswest")
self.device_role1 = DeviceRole.objects.create(name="Firewall", slug="firewall")
self.platform1 = Platform.objects.create(name="JunOS", slug="junos", napalm_driver="junos")
# self.platform2 = Platform.objects.create(name="Cisco NX-OS", slug="cisco-nx-os")
self.onboarding_task4 = OnboardingTask.objects.create(
ip_address="ntc123.local", site=self.site1, role=self.device_role1, platform=self.platform1
)
self.onboarding_task5 = OnboardingTask.objects.create(
ip_address="bad.local", site=self.site1, role=self.device_role1, platform=self.platform1
)
self.onboarding_task7 = OnboardingTask.objects.create(
ip_address="192.0.2.1/32", site=self.site1, role=self.device_role1, platform=self.platform1
)
@mock.patch("nautobot_device_onboarding.helpers.socket.gethostbyname")
def test_check_ip(self, mock_get_hostbyname):
"""Check DNS to IP address."""
# Look up response value
mock_get_hostbyname.return_value = "192.0.2.1"
# FQDN -> IP
onboarding_task_fqdn_to_ip(ot=self.onboarding_task4)
# Run the check to change the IP address
self.assertEqual(self.onboarding_task4.ip_address, "192.0.2.1")
@mock.patch("nautobot_device_onboarding.helpers.socket.gethostbyname")
def test_failed_check_ip(self, mock_get_hostbyname):
"""Check DNS to IP address failing."""
# Look up a failed response
mock_get_hostbyname.side_effect = gaierror(8)
# Check for bad.local raising an exception
with self.assertRaises(OnboardException) as exc_info:
onboarding_task_fqdn_to_ip(ot=self.onboarding_task5)
self.assertEqual(exc_info.exception.message, "ERROR failed to complete DNS lookup: bad.local")
self.assertEqual(exc_info.exception.reason, "fail-dns")
# Check for exception with prefix address entered
with self.assertRaises(OnboardException) as exc_info:
onboarding_task_fqdn_to_ip(ot=self.onboarding_task7)
self.assertEqual(exc_info.exception.reason, "fail-prefix")
self.assertEqual(exc_info.exception.message, "ERROR appears a prefix was entered: 192.0.2.1/32")
| 44.602564
| 108
| 0.728658
| 467
| 3,479
| 5.286938
| 0.357602
| 0.036857
| 0.058323
| 0.032402
| 0.387201
| 0.334143
| 0.321993
| 0.253949
| 0.238558
| 0.238558
| 0
| 0.022456
| 0.180799
| 3,479
| 77
| 109
| 45.181818
| 0.84386
| 0.29635
| 0
| 0.108108
| 0
| 0
| 0.131263
| 0.045549
| 0
| 0
| 0
| 0
| 0.189189
| 1
| 0.081081
| false
| 0
| 0.189189
| 0
| 0.297297
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a076cdd50a9d69b52cffcb8dbe3df578f17d801
| 2,577
|
py
|
Python
|
superneurons/tools/img_val/main.py
|
Phaeton-lang/baselines
|
472c248047fbb55b5fa0e620758047b7f0a1d041
|
[
"MIT"
] | null | null | null |
superneurons/tools/img_val/main.py
|
Phaeton-lang/baselines
|
472c248047fbb55b5fa0e620758047b7f0a1d041
|
[
"MIT"
] | null | null | null |
superneurons/tools/img_val/main.py
|
Phaeton-lang/baselines
|
472c248047fbb55b5fa0e620758047b7f0a1d041
|
[
"MIT"
] | null | null | null |
# Created by ay27 at 17/4/9
import os
import matplotlib.pyplot as plt
import struct
import numpy as np
def trans(row):
return list(map(lambda x: np.uint8(x), row))
def read_image(filename):
with open(filename, mode='rb') as file:
n = file.read(8)
n = struct.unpack("<Q", n)[0]
c = file.read(8)
c = struct.unpack("<Q", c)[0]
h = file.read(8)
h = struct.unpack("<Q", h)[0]
w = file.read(8)
w = struct.unpack("<Q", w)[0]
print(n, c, h, w)
for ii in range(n):
r = trans(file.read(h*w))
g = trans(file.read(h*w))
b = trans(file.read(h*w))
if ii == 100:
break
print(file.tell() == os.fstat(file.fileno()).st_size)
img = np.array([r,g,b]).transpose(1,0).reshape(h,w,c)
print(img.shape)
plt.imshow(img)
plt.show()
def read_label(path, ground_truth=None):
with open(path, 'rb') as file:
n = file.read(8)
n = struct.unpack("<Q", n)[0]
c = file.read(8)
c = struct.unpack("<Q", c)[0]
h = file.read(8)
h = struct.unpack("<Q", h)[0]
w = file.read(8)
w = struct.unpack("<Q", w)[0]
print(n, c, h, w)
label = []
sets = set()
while not (file.tell() == os.fstat(file.fileno()).st_size):
ch = file.read(4)
num = struct.unpack("<l", ch)[0]
label.append(num)
sets.add(num)
# print(file.tell() == os.fstat(file.fileno()).st_size)
print(label)
print(len(label))
# print(label[900],label[901], label[902], label[903], label[904])
return label
# if ground_truth:
# g = []
# with open(ground_truth) as file:
# for line in file:
# g.append(int(line.split(' ')[1]))
# np.testing.assert_array_equal(g, label)
if __name__ == '__main__':
# read_image('../../data/ilsvrc2012/img.bin')
# read_label('../../data/ilsvrc2012/label.bin', '../../data/ilsvrc2012/val.txt')
# read_image('../../build/cifar100_train_image.bin')
# read_label('../../build/cifar100_train_label.bin')
read_image('../../build/val_data_8.bin')
for i in range(10):
read_label('../../build/val_label_%d.bin' % i)
# labels = []
# for i in range(10):
# labels.append(read_label('../../build/val_label_%d.bin' % i))
#
# ground = []
# with open('../../build/shuffled_list') as file:
# ground.append()
| 28.01087
| 84
| 0.509895
| 365
| 2,577
| 3.50137
| 0.290411
| 0.075117
| 0.056338
| 0.032864
| 0.367762
| 0.312207
| 0.312207
| 0.312207
| 0.245696
| 0.189358
| 0
| 0.038376
| 0.302289
| 2,577
| 92
| 85
| 28.01087
| 0.672414
| 0.282499
| 0
| 0.339623
| 0
| 0
| 0.045927
| 0.029524
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.075472
| 0.018868
| 0.169811
| 0.113208
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a0afacd436c5c382b382e786080775c8a2d6bf7
| 5,581
|
py
|
Python
|
otp/chat/ChatInputNormal.py
|
P1ayerOne/src
|
3a4343e29f844fe95da7d51aaee7fb680d02bf72
|
[
"BSD-3-Clause"
] | null | null | null |
otp/chat/ChatInputNormal.py
|
P1ayerOne/src
|
3a4343e29f844fe95da7d51aaee7fb680d02bf72
|
[
"BSD-3-Clause"
] | null | null | null |
otp/chat/ChatInputNormal.py
|
P1ayerOne/src
|
3a4343e29f844fe95da7d51aaee7fb680d02bf72
|
[
"BSD-3-Clause"
] | null | null | null |
from direct.showbase import DirectObject
from otp.otpbase import OTPGlobals
import sys
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from otp.otpbase import OTPLocalizer
class ChatInputNormal(DirectObject.DirectObject):
ExecNamespace = None
def __init__(self, chatMgr):
self.chatMgr = chatMgr
self.normalPos = Vec3(-1.083, 0, 0.804)
self.whisperPos = Vec3(0.0, 0, 0.71)
self.whisperAvatarName = None
self.whisperAvatarId = None
self.toPlayer = 0
wantHistory = 0
if __dev__:
wantHistory = 1
self.wantHistory = base.config.GetBool('want-chat-history', wantHistory)
self.history = ['']
self.historySize = base.config.GetInt('chat-history-size', 10)
self.historyIndex = 0
return
def typeCallback(self, extraArgs):
messenger.send('enterNormalChat')
def delete(self):
self.ignore('arrow_up-up')
self.ignore('arrow_down-up')
self.chatFrame.destroy()
del self.chatFrame
del self.chatButton
del self.cancelButton
del self.chatEntry
del self.whisperLabel
del self.chatMgr
def activateByData(self, whisperAvatarId = None, toPlayer = 0):
self.toPlayer = toPlayer
self.whisperAvatarId = whisperAvatarId
self.whisperAvatarName = base.talkAssistant.findName(self.whisperAvatarId, self.toPlayer)
if self.whisperAvatarId:
self.chatFrame.setPos(self.whisperPos)
self.whisperLabel['text'] = OTPLocalizer.ChatInputWhisperLabel % self.whisperAvatarName
self.whisperLabel.show()
else:
self.chatFrame.setPos(self.normalPos)
self.whisperLabel.hide()
self.chatEntry['focus'] = 1
self.chatFrame.show()
if self.wantHistory:
self.accept('arrow_up-up', self.getPrevHistory)
self.accept('arrow_down-up', self.getNextHistory)
def deactivate(self):
self.chatEntry.set('')
self.chatEntry['focus'] = 0
self.chatFrame.hide()
self.whisperLabel.hide()
base.win.closeIme()
self.ignore('arrow_up-up')
self.ignore('arrow_down-up')
def checkForOverRide(self):
return False
def sendChat(self, text):
if self.checkForOverRide():
self.chatEntry.enterText('')
return
self.deactivate()
self.chatMgr.fsm.request('mainMenu')
if text:
if self.toPlayer:
if self.whisperAvatarId:
self.whisperAvatarName = None
self.whisperAvatarId = None
self.toPlayer = 0
elif self.whisperAvatarId:
self.chatMgr.sendWhisperString(text, self.whisperAvatarId)
self.whisperAvatarName = None
self.whisperAvatarId = None
else:
if self.chatMgr.execChat:
if text[0] == '>':
text = self.__execMessage(text[1:])
base.localAvatar.setChatAbsolute(text, CFSpeech | CFTimeout)
return
base.talkAssistant.sendOpenTalk(text)
if self.wantHistory:
self.addToHistory(text)
return
def chatOverflow(self, overflowText):
self.sendChat(self.chatEntry.get())
def __execMessage(self, message):
if not ChatInputNormal.ExecNamespace:
ChatInputNormal.ExecNamespace = {}
exec('from pandac.PandaModules import *', globals(), self.ExecNamespace)
self.importExecNamespace()
try:
if not isClient():
print('EXECWARNING ChatInputNormal eval: %s' % message)
printStack()
return str(eval(message, globals(), ChatInputNormal.ExecNamespace))
except SyntaxError:
try:
if not isClient():
print('EXECWARNING ChatInputNormal exec: %s' % message)
printStack()
exec(message, globals(), ChatInputNormal.ExecNamespace)
return 'ok'
except:
exception = sys.exc_info()[0]
extraInfo = sys.exc_info()[1]
if extraInfo:
return str(extraInfo)
else:
return str(exception)
except:
exception = sys.exc_info()[0]
extraInfo = sys.exc_info()[1]
if extraInfo:
return str(extraInfo)
else:
return str(exception)
def cancelButtonPressed(self):
self.chatEntry.set('')
self.chatMgr.fsm.request('mainMenu')
def chatButtonPressed(self):
self.sendChat(self.chatEntry.get())
def importExecNamespace(self):
pass
def addToHistory(self, text):
self.history = [text] + self.history[:self.historySize - 1]
self.historyIndex = 0
def getPrevHistory(self):
self.chatEntry.set(self.history[self.historyIndex])
self.historyIndex += 1
self.historyIndex %= len(self.history)
def getNextHistory(self):
self.chatEntry.set(self.history[self.historyIndex])
self.historyIndex -= 1
self.historyIndex %= len(self.history)
def setPos(self, posX, posY = None, posZ = None):
if posX and posY and posZ:
self.chatFrame.setPos(posX, posY, posZ)
else:
self.chatFrame.setPos(posX)
| 34.450617
| 99
| 0.583766
| 529
| 5,581
| 6.117202
| 0.245747
| 0.058714
| 0.035538
| 0.024722
| 0.304388
| 0.272868
| 0.237948
| 0.2089
| 0.182324
| 0.144623
| 0
| 0.009757
| 0.320552
| 5,581
| 161
| 100
| 34.664596
| 0.843618
| 0
| 0
| 0.398601
| 0
| 0
| 0.046407
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111888
| false
| 0.006993
| 0.062937
| 0.006993
| 0.265734
| 0.027972
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a0b53a65038120d7c635ea3a3f7ba3752ca109e
| 14,068
|
py
|
Python
|
train_text_summarizer.py
|
stevaras2/bert
|
1efaa300eb91dea85c40de5e1586e8d2c94b89bb
|
[
"Apache-2.0"
] | 1
|
2019-11-28T10:03:09.000Z
|
2019-11-28T10:03:09.000Z
|
train_text_summarizer.py
|
stevaras2/bert
|
1efaa300eb91dea85c40de5e1586e8d2c94b89bb
|
[
"Apache-2.0"
] | null | null | null |
train_text_summarizer.py
|
stevaras2/bert
|
1efaa300eb91dea85c40de5e1586e8d2c94b89bb
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import json
import numpy as np
import pandas as pd
import os
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,f1_score
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import backend as K
from keras.utils.vis_utils import plot_model
from sklearn.externals import joblib
import time
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def get_embeddings(sentences_list,layer_json):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:return: Dictionary with key each sentence of the sentences_list and as value the embedding
'''
sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence
embeddings = dict()##dict with key the index of each sentence and as value the its embedding
sentence_emb = dict()#key:sentence,value:its embedding
with open(sentences_list,'r') as file:
for index,line in enumerate(file):
sentences[index] = line.strip()
with open(layer_json, 'r',encoding='utf-8') as f:
for line in f:
embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features'])
for key,value in sentences.items():
sentence_emb[value] = embeddings[key]
return sentence_emb
def train_classifier(sentences_list,layer_json,dataset_csv,filename):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:param filename: The path of the pickle file that the model will be stored
:return:
'''
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list,layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, length]) # np.append(features,length,axis=1)
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
log = LogisticRegression(random_state=0, solver='newton-cg', max_iter=1000, C=0.1)
log.fit(X_train, y_train)
#save the model
_ = joblib.dump(log, filename, compress=9)
predictions = log.predict(X_val)
print("###########################################")
print("Results using embeddings from the",layer_json,"file")
print(classification_report(y_val, predictions))
print("F1 score using Logistic Regression:",f1_score(y_val, predictions))
print("###########################################")
#train a DNN
f1_results = list()
for i in range(3):
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
# compile network
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1])
# fit network
model.fit(X_train, y_train, epochs=100, batch_size=64)
loss, f_1 = model.evaluate(X_val, y_val, verbose=1)
print('\nTest F1: %f' % (f_1 * 100))
f1_results.append(f_1)
model = None
print("###########################################")
print("Results using embeddings from the", layer_json, "file")
# evaluate
print(np.mean(f1_results))
print("###########################################")
def parameter_tuning_LR(sentences_list,layer_json,dataset_csv):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:return:
'''
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list,layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, length])
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
C = [0.1,1,2,5,10]
solver = ['newton-cg','saga','sag']
best_params = dict()
best_score = 0.0
for c in C:
for s in solver:
start = time.time()
log = LogisticRegression(random_state=0, solver=s, max_iter=1000, C=c)
log.fit(X_train, y_train)
predictions = log.predict(X_val)
print("###########################################")
print("LR with C =",c,'and solver = ',s)
print("Results using embeddings from the", layer_json, "file")
print(classification_report(y_val, predictions))
f1 = f1_score(y_val, predictions)
if f1 > best_score:
best_score = f1
best_params['c'] = c
best_params['solver'] = s
print("F1 score using Logistic Regression:",f1)
print("###########################################")
end = time.time()
running_time = end - start
print("Running time:"+str(running_time))
def visualize_DNN(file_to_save):
'''
Save the DNN architecture to a png file. Better use the Visulize_DNN.ipynd
:param file_to_save: the png file that the architecture of the DNN will be saved.
:return: None
'''
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
plot_model(model, to_file=file_to_save, show_shapes=True)
def save_model(sentences_list,layer_json,dataset_csv,pkl):
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list, layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb, section_emb], axis=1)
features = np.column_stack([features, length])
print(features.shape)
log = LogisticRegression(random_state=0, solver='saga', max_iter=1000, C=1)
log.fit(features, label)
_ = joblib.dump(log, pkl, compress=9)
if __name__ == '__main__':
#save_model('sentences_list.txt','Fudan_output_layer_-1.json','train_sentences1.csv','summarizer1.pkl')
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--sentences", required=True, help="sentences list")
ap.add_argument("-o", "--output", required=True, help="output")
ap.add_argument("-ts", "--train set", required=True, help="path to train set")
ap.add_argument("-sp", "--summarizer path", required=True, help="path to save summarizer")
args = vars(ap.parse_args())
layer = train_classifier(args['sentences'], args['output'], args['train set'],args['summarizer path'])
#layer_1 = train_classifier('sentences_list.txt', 'new_output_layer_-1.json', 'train_sentences1.csv','fine_tune_BERT_sentence_classification1.pkl')
#layer_2 = train_classifier('sentences_list.txt','new_output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification2.pkl')
#layer_3 = train_classifier('sentences_list.txt','new_output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification3.pkl')
#layer_4 = train_classifier('sentences_list.txt','new_output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification4.pkl')
#tuning = parameter_tuning_LR('sentences_list.txt','new_output_layer_-1.json','train_sentences1.csv')
#layer_1 = train_classifier('sentences_list.txt','output_layer_-1.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_2 = train_classifier('sentences_list.txt','output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_3 = train_classifier('sentences_list.txt','output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_4 = train_classifier('sentences_list.txt','output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
| 35.705584
| 151
| 0.649062
| 1,890
| 14,068
| 4.632275
| 0.12963
| 0.033923
| 0.013706
| 0.02193
| 0.710565
| 0.68578
| 0.653341
| 0.62924
| 0.583895
| 0.57076
| 0
| 0.020858
| 0.216164
| 14,068
| 393
| 152
| 35.796438
| 0.773102
| 0.201521
| 0
| 0.684015
| 0
| 0
| 0.070995
| 0.023393
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02974
| false
| 0
| 0.052045
| 0
| 0.096654
| 0.152416
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a0d48bd45e2a77d4024e66ae20d64213df72227
| 1,493
|
py
|
Python
|
src/test/python/apache/aurora/executor/test_status_manager.py
|
zmanji/incubator-aurora
|
9f594f1de6bbf46c74863dd3fc4d2708b7a974f2
|
[
"Apache-2.0"
] | null | null | null |
src/test/python/apache/aurora/executor/test_status_manager.py
|
zmanji/incubator-aurora
|
9f594f1de6bbf46c74863dd3fc4d2708b7a974f2
|
[
"Apache-2.0"
] | null | null | null |
src/test/python/apache/aurora/executor/test_status_manager.py
|
zmanji/incubator-aurora
|
9f594f1de6bbf46c74863dd3fc4d2708b7a974f2
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from unittest import TestCase
import mock
from mesos.interface.mesos_pb2 import TaskState
from apache.aurora.executor.common.status_checker import StatusChecker
from apache.aurora.executor.status_manager import StatusManager
class FakeStatusChecker(StatusChecker):
def __init__(self):
self.call_count = 0
@property
def status(self):
if self.call_count == 2:
return TaskState.Value('TASK_KILLED')
self.call_count += 1
return None
class TestStatusManager(TestCase):
def setUp(self):
self.callback_called = False
def test_run(self):
checker = FakeStatusChecker()
def callback(result):
assert result == TaskState.Value('TASK_KILLED')
self.callback_called = True
mock_time = mock.Mock(spec=time)
status_manager = StatusManager(checker, callback, mock_time)
status_manager.run()
assert mock_time.sleep.call_count == 2
assert self.callback_called is True
| 29.27451
| 74
| 0.750167
| 206
| 1,493
| 5.330097
| 0.504854
| 0.054645
| 0.035519
| 0.029144
| 0.051002
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007293
| 0.173476
| 1,493
| 50
| 75
| 29.86
| 0.882496
| 0.348962
| 0
| 0
| 0
| 0
| 0.022965
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 1
| 0.178571
| false
| 0
| 0.214286
| 0
| 0.535714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a0d98e91f0c9a170743b5f41866a399dbce8684
| 3,494
|
py
|
Python
|
Supplemental/A5. Collision estimation module/Con_est.py
|
wangqf1997/Human-injury-based-safety-decision-of-automated-vehicles
|
b104fdeb3d85e867f6b04c5ae7b5a197e705aeba
|
[
"CC-BY-4.0"
] | null | null | null |
Supplemental/A5. Collision estimation module/Con_est.py
|
wangqf1997/Human-injury-based-safety-decision-of-automated-vehicles
|
b104fdeb3d85e867f6b04c5ae7b5a197e705aeba
|
[
"CC-BY-4.0"
] | null | null | null |
Supplemental/A5. Collision estimation module/Con_est.py
|
wangqf1997/Human-injury-based-safety-decision-of-automated-vehicles
|
b104fdeb3d85e867f6b04c5ae7b5a197e705aeba
|
[
"CC-BY-4.0"
] | null | null | null |
'''
-------------------------------------------------------------------------------------------------
This code accompanies the paper titled "Human injury-based safety decision of automated vehicles"
Author: Qingfan Wang, Qing Zhou, Miao Lin, Bingbing Nie
Corresponding author: Bingbing Nie ([email protected])
-------------------------------------------------------------------------------------------------
'''
import torch
import numpy as np
from torch import nn
from torch.nn.utils import weight_norm
__author__ = "Qingfan Wang"
def Collision_cond(veh_striking_list, V1_v, V2_v, delta_angle, veh_param):
''' Estimate the collision condition. '''
(veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param
delta_angle_2 = np.arccos(np.abs(np.cos(delta_angle)))
if -1e-6 < delta_angle_2 < 1e-6:
delta_angle_2 = 1e-6
delta_v1_list = []
delta_v2_list = []
# Estimate the collision condition (delat-v) according to the principal impact direction.
for veh_striking in veh_striking_list:
if veh_striking[0] == 1:
veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0])
veh_a2 = np.abs(veh_cgs[1] - veh_striking[3])
veh_RDS = np.abs(V1_v * np.cos(delta_angle) - V2_v)
veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(veh_ca + delta_angle_2))
if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]:
veh_e = 2 / veh_RDS
else:
veh_e = 0.5 / veh_RDS
elif veh_striking[0] == 2:
veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0])
veh_a2 = np.abs(veh_cgf[1] - veh_striking[3])
veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2))
veh_RDS = V1_v * np.sin(delta_angle_2)
veh_e = 1.5 / veh_RDS
elif veh_striking[0] == 3:
veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1])
veh_a1 = np.abs(veh_cgs[0] - veh_striking[3])
veh_RDS = np.abs(V2_v * np.cos(delta_angle) - V1_v)
veh_a2 = np.abs(np.sqrt(veh_cgf[1] ** 2 + veh_cgs[1] ** 2) * np.cos(veh_ca + delta_angle_2))
if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]:
veh_e = 2 / veh_RDS
else:
veh_e = 0.5 / veh_RDS
elif veh_striking[0] == 4:
veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1])
veh_a1 = np.abs(veh_cgf[0] - veh_striking[3])
veh_a2 = np.abs(np.sqrt(veh_cgf[1] ** 2 + veh_cgs[1] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2))
veh_RDS = V2_v * np.sin(delta_angle_2)
veh_e = 1.5 / veh_RDS
# Obtain delta-v based on the plane 2-DOF rigid-body collision model with momentum conservation.
veh_y1 = veh_k[0] ** 2 / (veh_a1 ** 2 + veh_k[0] ** 2)
veh_y2 = veh_k[1] ** 2 / (veh_a2 ** 2 + veh_k[1] ** 2)
delta_v1 = (1 + veh_e) * veh_m[1] * veh_y1 * veh_y2 * veh_RDS / (veh_m[0] * veh_y1 + veh_m[1] * veh_y2)
delta_v2 = (1 + veh_e) * veh_m[0] * veh_y1 * veh_y2 * veh_RDS / (veh_m[0] * veh_y1 + veh_m[1] * veh_y2)
delta_v1_list.append(delta_v1)
delta_v2_list.append(delta_v2)
delta_v1_ = max(delta_v1_list)
delta_v2_ = max(delta_v2_list)
index = delta_v1_list.index(max(delta_v1_list))
return delta_v1_, delta_v2_, index
| 43.135802
| 117
| 0.556955
| 581
| 3,494
| 3.061962
| 0.189329
| 0.040472
| 0.055649
| 0.042159
| 0.539629
| 0.470489
| 0.467678
| 0.432827
| 0.412591
| 0.412591
| 0
| 0.070189
| 0.257871
| 3,494
| 81
| 118
| 43.135802
| 0.615889
| 0.178878
| 0
| 0.264151
| 0
| 0
| 0.004203
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.075472
| 0
| 0.113208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|