hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
8a0ead4871ddc6b047237522f5f34d4d48742f52
11,790
py
Python
train/train.py
TontonTremblay/pixel-nerf
349b5f3f173cd76def05b6de8aa52c69a4f0c7fa
[ "BSD-2-Clause" ]
null
null
null
train/train.py
TontonTremblay/pixel-nerf
349b5f3f173cd76def05b6de8aa52c69a4f0c7fa
[ "BSD-2-Clause" ]
null
null
null
train/train.py
TontonTremblay/pixel-nerf
349b5f3f173cd76def05b6de8aa52c69a4f0c7fa
[ "BSD-2-Clause" ]
null
null
null
# Training to a set of multiple objects (e.g. ShapeNet or DTU) # tensorboard logs available in logs/<expname> import sys import os sys.path.insert( 0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src")) ) import warnings import trainlib from model import make_model, loss from render import NeRFRenderer from data import get_split_dataset import util import numpy as np import torch.nn.functional as F import torch from dotmap import DotMap def extra_args(parser): parser.add_argument( "--batch_size", "-B", type=int, default=4, help="Object batch size ('SB')" ) parser.add_argument( "--nviews", "-V", type=str, default="1", help="Number of source views (multiview); put multiple (space delim) to pick randomly per batch ('NV')", ) parser.add_argument( "--freeze_enc", action="store_true", default=None, help="Freeze encoder weights and only train MLP", ) parser.add_argument( "--no_bbox_step", type=int, default=100000, help="Step to stop using bbox sampling", ) parser.add_argument( "--fixed_test", action="store_true", default=None, help="Freeze encoder weights and only train MLP", ) return parser args, conf = util.args.parse_args(extra_args, training=True, default_ray_batch_size=128) device = util.get_cuda(args.gpu_id[0]) dset, val_dset, _ = get_split_dataset(args.dataset_format, args.datadir) print( "dset z_near {}, z_far {}, lindisp {}".format(dset.z_near, dset.z_far, dset.lindisp) ) net = make_model(conf["model"]).to(device=device) net.stop_encoder_grad = args.freeze_enc if args.freeze_enc: print("Encoder frozen") net.encoder.eval() renderer = NeRFRenderer.from_conf(conf["renderer"], lindisp=dset.lindisp,).to( device=device ) # Parallize render_par = renderer.bind_parallel(net, args.gpu_id).eval() nviews = list(map(int, args.nviews.split())) class PixelNeRFTrainer(trainlib.Trainer): def __init__(self): super().__init__(net, dset, val_dset, args, conf["train"], device=device) self.renderer_state_path = "%s/%s/_renderer" % ( self.args.checkpoints_path, self.args.name, ) self.lambda_coarse = conf.get_float("loss.lambda_coarse") self.lambda_fine = conf.get_float("loss.lambda_fine", 1.0) print( "lambda coarse {} and fine {}".format(self.lambda_coarse, self.lambda_fine) ) self.rgb_coarse_crit = loss.get_rgb_loss(conf["loss.rgb"], True) fine_loss_conf = conf["loss.rgb"] if "rgb_fine" in conf["loss"]: print("using fine loss") fine_loss_conf = conf["loss.rgb_fine"] self.rgb_fine_crit = loss.get_rgb_loss(fine_loss_conf, False) if args.resume: if os.path.exists(self.renderer_state_path): renderer.load_state_dict( torch.load(self.renderer_state_path, map_location=device) ) self.z_near = dset.z_near self.z_far = dset.z_far self.use_bbox = args.no_bbox_step > 0 def post_batch(self, epoch, batch): renderer.sched_step(args.batch_size) def extra_save_state(self): torch.save(renderer.state_dict(), self.renderer_state_path) def calc_losses(self, data, is_train=True, global_step=0): if "images" not in data: return {} all_images = data["images"].to(device=device) # (SB, NV, 3, H, W) SB, NV, _, H, W = all_images.shape all_poses = data["poses"].to(device=device) # (SB, NV, 4, 4) all_bboxes = data.get("bbox") # (SB, NV, 4) cmin rmin cmax rmax all_focals = data["focal"] # (SB) all_c = data.get("c") # (SB) if self.use_bbox and global_step >= args.no_bbox_step: self.use_bbox = False print(">>> Stopped using bbox sampling @ iter", global_step) if not is_train or not self.use_bbox: all_bboxes = None all_rgb_gt = [] all_rays = [] curr_nviews = nviews[torch.randint(0, len(nviews), ()).item()] if curr_nviews == 1: image_ord = torch.randint(0, NV, (SB, 1)) else: image_ord = torch.empty((SB, curr_nviews), dtype=torch.long) for obj_idx in range(SB): if all_bboxes is not None: bboxes = all_bboxes[obj_idx] images = all_images[obj_idx] # (NV, 3, H, W) poses = all_poses[obj_idx] # (NV, 4, 4) focal = all_focals[obj_idx] c = None if "c" in data: c = data["c"][obj_idx] if curr_nviews > 1: # Somewhat inefficient, don't know better way image_ord[obj_idx] = torch.from_numpy( np.random.choice(NV, curr_nviews, replace=False) ) images_0to1 = images * 0.5 + 0.5 cam_rays = util.gen_rays( poses, W, H, focal, self.z_near, self.z_far, c=c ) # (NV, H, W, 8) rgb_gt_all = images_0to1 rgb_gt_all = ( rgb_gt_all.permute(0, 2, 3, 1).contiguous().reshape(-1, 3) ) # (NV, H, W, 3) if all_bboxes is not None: pix = util.bbox_sample(bboxes, args.ray_batch_size) pix_inds = pix[..., 0] * H * W + pix[..., 1] * W + pix[..., 2] else: pix_inds = torch.randint(0, NV * H * W, (args.ray_batch_size,)) rgb_gt = rgb_gt_all[pix_inds] # (ray_batch_size, 3) rays = cam_rays.view(-1, cam_rays.shape[-1])[pix_inds].to( device=device ) # (ray_batch_size, 8) all_rgb_gt.append(rgb_gt) all_rays.append(rays) all_rgb_gt = torch.stack(all_rgb_gt) # (SB, ray_batch_size, 3) all_rays = torch.stack(all_rays) # (SB, ray_batch_size, 8) image_ord = image_ord.to(device) src_images = util.batched_index_select_nd( all_images, image_ord ) # (SB, NS, 3, H, W) src_poses = util.batched_index_select_nd(all_poses, image_ord) # (SB, NS, 4, 4) all_bboxes = all_poses = all_images = None net.encode( src_images, src_poses, all_focals.to(device=device), c=all_c.to(device=device) if all_c is not None else None, ) render_dict = DotMap(render_par(all_rays, want_weights=True,)) coarse = render_dict.coarse fine = render_dict.fine using_fine = len(fine) > 0 loss_dict = {} rgb_loss = self.rgb_coarse_crit(coarse.rgb, all_rgb_gt) if rgb_loss.isnan().any().item()==True: raise() loss_dict["rc"] = rgb_loss.item() * self.lambda_coarse if using_fine: fine_loss = self.rgb_fine_crit(fine.rgb, all_rgb_gt) rgb_loss = rgb_loss * self.lambda_coarse + fine_loss * self.lambda_fine loss_dict["rf"] = fine_loss.item() * self.lambda_fine loss = rgb_loss if is_train: loss.backward() loss_dict["t"] = loss.item() return loss_dict def train_step(self, data, global_step): return self.calc_losses(data, is_train=True, global_step=global_step) def eval_step(self, data, global_step): renderer.eval() losses = self.calc_losses(data, is_train=False, global_step=global_step) renderer.train() return losses def vis_step(self, data, global_step, idx=None): if "images" not in data: return {} if idx is None: batch_idx = np.random.randint(0, data["images"].shape[0]) else: print(idx) batch_idx = idx images = data["images"][batch_idx].to(device=device) # (NV, 3, H, W) poses = data["poses"][batch_idx].to(device=device) # (NV, 4, 4) focal = data["focal"][batch_idx : batch_idx + 1] # (1) c = data.get("c") if c is not None: c = c[batch_idx : batch_idx + 1] # (1) NV, _, H, W = images.shape cam_rays = util.gen_rays( poses, W, H, focal, self.z_near, self.z_far, c=c ) # (NV, H, W, 8) images_0to1 = images * 0.5 + 0.5 # (NV, 3, H, W) curr_nviews = nviews[torch.randint(0, len(nviews), (1,)).item()] views_src = np.sort(np.random.choice(NV, curr_nviews, replace=False)) view_dest = np.random.randint(0, NV - curr_nviews) for vs in range(curr_nviews): view_dest += view_dest >= views_src[vs] views_src = torch.from_numpy(views_src) # set renderer net to eval mode renderer.eval() source_views = ( images_0to1[views_src] .permute(0, 2, 3, 1) .cpu() .numpy() .reshape(-1, H, W, 3) ) gt = images_0to1[view_dest].permute(1, 2, 0).cpu().numpy().reshape(H, W, 3) with torch.no_grad(): test_rays = cam_rays[view_dest] # (H, W, 8) test_images = images[views_src] # (NS, 3, H, W) net.encode( test_images.unsqueeze(0), poses[views_src].unsqueeze(0), focal.to(device=device), c=c.to(device=device) if c is not None else None, ) test_rays = test_rays.reshape(1, H * W, -1) render_dict = DotMap(render_par(test_rays, want_weights=True)) coarse = render_dict.coarse fine = render_dict.fine using_fine = len(fine) > 0 alpha_coarse_np = coarse.weights[0].sum(dim=-1).cpu().numpy().reshape(H, W) rgb_coarse_np = coarse.rgb[0].cpu().numpy().reshape(H, W, 3) depth_coarse_np = coarse.depth[0].cpu().numpy().reshape(H, W) if using_fine: alpha_fine_np = fine.weights[0].sum(dim=1).cpu().numpy().reshape(H, W) depth_fine_np = fine.depth[0].cpu().numpy().reshape(H, W) rgb_fine_np = fine.rgb[0].cpu().numpy().reshape(H, W, 3) print("c rgb min {} max {}".format(rgb_coarse_np.min(), rgb_coarse_np.max())) print( "c alpha min {}, max {}".format( alpha_coarse_np.min(), alpha_coarse_np.max() ) ) alpha_coarse_cmap = util.cmap(alpha_coarse_np) / 255 depth_coarse_cmap = util.cmap(depth_coarse_np) / 255 vis_list = [ *source_views, gt, depth_coarse_cmap, rgb_coarse_np, alpha_coarse_cmap, ] vis_coarse = np.hstack(vis_list) vis = vis_coarse if using_fine: print("f rgb min {} max {}".format(rgb_fine_np.min(), rgb_fine_np.max())) print( "f alpha min {}, max {}".format( alpha_fine_np.min(), alpha_fine_np.max() ) ) depth_fine_cmap = util.cmap(depth_fine_np) / 255 alpha_fine_cmap = util.cmap(alpha_fine_np) / 255 vis_list = [ *source_views, gt, depth_fine_cmap, rgb_fine_np, alpha_fine_cmap, ] vis_fine = np.hstack(vis_list) vis = np.vstack((vis_coarse, vis_fine)) rgb_psnr = rgb_fine_np else: rgb_psnr = rgb_coarse_np psnr = util.psnr(rgb_psnr, gt) vals = {"psnr": psnr} print("psnr", psnr) # set the renderer network back to train mode renderer.train() return vis, vals trainer = PixelNeRFTrainer() trainer.start()
33.976945
112
0.566073
1,605
11,790
3.917134
0.160748
0.007317
0.024495
0.017815
0.271831
0.193415
0.129633
0.112931
0.072213
0.072213
0
0.015012
0.310687
11,790
346
113
34.075145
0.758583
0.048176
0
0.190813
0
0.003534
0.063796
0
0
0
0
0
0
1
0.028269
false
0
0.042403
0.003534
0.09894
0.038869
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a10a1ae5c36176cfdd1c3ad55656efe8325a99f
20,351
py
Python
napari/_qt/dialogs/qt_plugin_dialog.py
kne42/napari
d61d0be0ef8ea622dd3d6acd270c0529816c11ec
[ "BSD-3-Clause" ]
null
null
null
napari/_qt/dialogs/qt_plugin_dialog.py
kne42/napari
d61d0be0ef8ea622dd3d6acd270c0529816c11ec
[ "BSD-3-Clause" ]
null
null
null
napari/_qt/dialogs/qt_plugin_dialog.py
kne42/napari
d61d0be0ef8ea622dd3d6acd270c0529816c11ec
[ "BSD-3-Clause" ]
null
null
null
import os import sys from pathlib import Path from typing import Sequence from napari_plugin_engine.dist import standard_metadata from napari_plugin_engine.exceptions import PluginError from qtpy.QtCore import QEvent, QProcess, QProcessEnvironment, QSize, Qt, Slot from qtpy.QtGui import QFont, QMovie from qtpy.QtWidgets import ( QCheckBox, QDialog, QFrame, QHBoxLayout, QLabel, QLineEdit, QListWidget, QListWidgetItem, QPushButton, QSizePolicy, QSplitter, QTextEdit, QVBoxLayout, QWidget, ) import napari.resources from ...plugins import plugin_manager from ...plugins.pypi import ( ProjectInfo, iter_napari_plugin_info, normalized_name, ) from ...utils._appdirs import user_plugin_dir, user_site_packages from ...utils.misc import parse_version, running_as_bundled_app from ...utils.translations import trans from ..qthreading import create_worker from ..widgets.qt_eliding_label import ElidingLabel from ..widgets.qt_plugin_sorter import QtPluginSorter from .qt_plugin_report import QtPluginErrReporter # TODO: add error icon and handle pip install errors # TODO: add queue to handle clicks when already processing class Installer: def __init__(self, output_widget: QTextEdit = None): from ...plugins import plugin_manager # create install process self._output_widget = None self.process = QProcess() self.process.setProgram(sys.executable) self.process.setProcessChannelMode(QProcess.MergedChannels) self.process.readyReadStandardOutput.connect(self._on_stdout_ready) # setup process path env = QProcessEnvironment() combined_paths = os.pathsep.join( [user_site_packages(), env.systemEnvironment().value("PYTHONPATH")] ) env.insert("PYTHONPATH", combined_paths) # use path of parent process env.insert( "PATH", QProcessEnvironment.systemEnvironment().value("PATH") ) self.process.setProcessEnvironment(env) self.process.finished.connect(lambda: plugin_manager.discover()) self.process.finished.connect(lambda: plugin_manager.prune()) self.set_output_widget(output_widget) def set_output_widget(self, output_widget: QTextEdit): if output_widget: self._output_widget = output_widget self.process.setParent(output_widget) def _on_stdout_ready(self): if self._output_widget: text = self.process.readAllStandardOutput().data().decode() self._output_widget.append(text) def install(self, pkg_list: Sequence[str]): cmd = ['-m', 'pip', 'install', '--upgrade'] if running_as_bundled_app() and sys.platform.startswith('linux'): cmd += [ '--no-warn-script-location', '--prefix', user_plugin_dir(), ] self.process.setArguments(cmd + list(pkg_list)) if self._output_widget: self._output_widget.clear() self.process.start() def uninstall(self, pkg_list: Sequence[str]): args = ['-m', 'pip', 'uninstall', '-y'] self.process.setArguments(args + list(pkg_list)) if self._output_widget: self._output_widget.clear() self.process.start() for pkg in pkg_list: plugin_manager.unregister(pkg) class PluginListItem(QFrame): def __init__( self, package_name: str, version: str = '', url: str = '', summary: str = '', author: str = '', license: str = "UNKNOWN", *, plugin_name: str = None, parent: QWidget = None, enabled: bool = True, ): super().__init__(parent) self.setup_ui(enabled) if plugin_name: self.plugin_name.setText(plugin_name) self.package_name.setText(f"{package_name} {version}") self.summary.setText(summary) self.package_author.setText(author) self.action_button.setText(trans._("uninstall")) self.action_button.setObjectName("remove_button") self.enabled_checkbox.setChecked(enabled) if PluginError.get(plugin_name=plugin_name): def _show_error(): rep = QtPluginErrReporter( parent=self._get_dialog(), initial_plugin=plugin_name ) rep.setWindowFlags(Qt.Sheet) close = QPushButton(trans._("close"), rep) rep.layout.addWidget(close) rep.plugin_combo.hide() close.clicked.connect(rep.close) rep.open() self.error_indicator.clicked.connect(_show_error) self.error_indicator.show() self.summary.setIndent(18) else: self.summary.setIndent(38) else: self.plugin_name.setText(package_name) self.package_name.setText(version) self.summary.setText(summary) self.package_author.setText(author) self.action_button.setText(trans._("install")) self.enabled_checkbox.hide() def _get_dialog(self) -> QDialog: p = self.parent() while not isinstance(p, QDialog) and p.parent(): p = p.parent() return p def setup_ui(self, enabled=True): self.v_lay = QVBoxLayout(self) self.v_lay.setContentsMargins(-1, 6, -1, 6) self.v_lay.setSpacing(0) self.row1 = QHBoxLayout() self.row1.setSpacing(6) self.enabled_checkbox = QCheckBox(self) self.enabled_checkbox.setChecked(enabled) self.enabled_checkbox.stateChanged.connect(self._on_enabled_checkbox) self.enabled_checkbox.setToolTip(trans._("enable/disable")) sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.enabled_checkbox.sizePolicy().hasHeightForWidth() ) self.enabled_checkbox.setSizePolicy(sizePolicy) self.enabled_checkbox.setMinimumSize(QSize(20, 0)) self.enabled_checkbox.setText("") self.row1.addWidget(self.enabled_checkbox) self.plugin_name = QLabel(self) sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.plugin_name.sizePolicy().hasHeightForWidth() ) self.plugin_name.setSizePolicy(sizePolicy) font15 = QFont() font15.setPointSize(15) self.plugin_name.setFont(font15) self.row1.addWidget(self.plugin_name) self.package_name = QLabel(self) self.package_name.setAlignment( Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter ) self.row1.addWidget(self.package_name) self.action_button = QPushButton(self) sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.action_button.sizePolicy().hasHeightForWidth() ) self.action_button.setSizePolicy(sizePolicy) self.row1.addWidget(self.action_button) self.v_lay.addLayout(self.row1) self.row2 = QHBoxLayout() self.error_indicator = QPushButton() self.error_indicator.setObjectName("warning_icon") self.error_indicator.setCursor(Qt.PointingHandCursor) self.error_indicator.hide() self.row2.addWidget(self.error_indicator) self.row2.setContentsMargins(-1, 4, 0, -1) self.summary = ElidingLabel(parent=self) sizePolicy = QSizePolicy( QSizePolicy.MinimumExpanding, QSizePolicy.Preferred ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.summary.sizePolicy().hasHeightForWidth() ) self.summary.setSizePolicy(sizePolicy) self.summary.setObjectName("small_text") self.row2.addWidget(self.summary) self.package_author = QLabel(self) sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.package_author.sizePolicy().hasHeightForWidth() ) self.package_author.setSizePolicy(sizePolicy) self.package_author.setObjectName("small_text") self.row2.addWidget(self.package_author) self.v_lay.addLayout(self.row2) def _on_enabled_checkbox(self, state: int): """Called with `state` when checkbox is clicked.""" plugin_manager.set_blocked(self.plugin_name.text(), not state) class QPluginList(QListWidget): def __init__(self, parent: QWidget, installer: Installer): super().__init__(parent) self.installer = installer self.setSortingEnabled(True) @Slot(ProjectInfo) def addItem( self, project_info: ProjectInfo, plugin_name=None, enabled=True ): # don't add duplicates if ( self.findItems(project_info.name, Qt.MatchFixedString) and not plugin_name ): return # including summary here for sake of filtering below. searchable_text = project_info.name + " " + project_info.summary item = QListWidgetItem(searchable_text, parent=self) item.version = project_info.version super().addItem(item) widg = PluginListItem( *project_info, parent=self, plugin_name=plugin_name, enabled=enabled, ) method = getattr( self.installer, 'uninstall' if plugin_name else 'install' ) widg.action_button.clicked.connect(lambda: method([project_info.name])) item.setSizeHint(widg.sizeHint()) self.setItemWidget(item, widg) @Slot(ProjectInfo) def tag_outdated(self, project_info: ProjectInfo): for item in self.findItems(project_info.name, Qt.MatchFixedString): current = item.version latest = project_info.version if parse_version(current) >= parse_version(latest): continue if hasattr(item, 'outdated'): # already tagged it continue item.outdated = True widg = self.itemWidget(item) update_btn = QPushButton( trans._("update (v{latest})", latest=latest), widg ) update_btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) update_btn.clicked.connect( lambda: self.installer.install([item.text()]) ) widg.row1.insertWidget(3, update_btn) def filter(self, text: str): """Filter items to those containing `text`.""" shown = self.findItems(text, Qt.MatchContains) for i in range(self.count()): item = self.item(i) item.setHidden(item not in shown) class QtPluginDialog(QDialog): def __init__(self, parent=None): super().__init__(parent) self.installer = Installer() self.setup_ui() self.installer.set_output_widget(self.stdout_text) self.installer.process.started.connect(self._on_installer_start) self.installer.process.finished.connect(self._on_installer_done) self.refresh() def _on_installer_start(self): self.show_status_btn.setChecked(True) self.working_indicator.show() self.process_error_indicator.hide() def _on_installer_done(self, exit_code, exit_status): self.working_indicator.hide() if exit_code: self.process_error_indicator.show() else: self.show_status_btn.setChecked(False) self.refresh() self.plugin_sorter.refresh() def refresh(self): self.installed_list.clear() self.available_list.clear() # fetch installed from ...plugins import plugin_manager plugin_manager.discover() # since they might not be loaded yet already_installed = set() for plugin_name, mod_name, distname in plugin_manager.iter_available(): # not showing these in the plugin dialog if plugin_name in ('napari_plugin_engine',): continue if distname: already_installed.add(distname) meta = standard_metadata(distname) else: meta = {} self.installed_list.addItem( ProjectInfo( normalized_name(distname or ''), meta.get('version', ''), meta.get('url', ''), meta.get('summary', ''), meta.get('author', ''), meta.get('license', ''), ), plugin_name=plugin_name, enabled=plugin_name in plugin_manager.plugins, ) # self.v_splitter.setSizes([70 * self.installed_list.count(), 10, 10]) # fetch available plugins self.worker = create_worker(iter_napari_plugin_info) def _handle_yield(project_info): if project_info.name in already_installed: self.installed_list.tag_outdated(project_info) else: self.available_list.addItem(project_info) self.worker.yielded.connect(_handle_yield) self.worker.finished.connect(self.working_indicator.hide) self.worker.finished.connect(self._update_count_in_label) self.worker.start() def setup_ui(self): self.resize(1080, 640) vlay_1 = QVBoxLayout(self) self.h_splitter = QSplitter(self) vlay_1.addWidget(self.h_splitter) self.h_splitter.setOrientation(Qt.Horizontal) self.v_splitter = QSplitter(self.h_splitter) self.v_splitter.setOrientation(Qt.Vertical) self.v_splitter.setMinimumWidth(500) self.plugin_sorter = QtPluginSorter(parent=self.h_splitter) self.plugin_sorter.layout().setContentsMargins(2, 0, 0, 0) self.plugin_sorter.hide() installed = QWidget(self.v_splitter) lay = QVBoxLayout(installed) lay.setContentsMargins(0, 2, 0, 2) self.installed_label = QLabel(trans._("Installed Plugins")) self.installed_filter = QLineEdit() self.installed_filter.setPlaceholderText("search...") self.installed_filter.setMaximumWidth(350) self.installed_filter.setClearButtonEnabled(True) mid_layout = QHBoxLayout() mid_layout.addWidget(self.installed_label) mid_layout.addWidget(self.installed_filter) mid_layout.addStretch() lay.addLayout(mid_layout) self.installed_list = QPluginList(installed, self.installer) self.installed_filter.textChanged.connect(self.installed_list.filter) lay.addWidget(self.installed_list) uninstalled = QWidget(self.v_splitter) lay = QVBoxLayout(uninstalled) lay.setContentsMargins(0, 2, 0, 2) self.avail_label = QLabel(trans._("Available Plugins")) self.avail_filter = QLineEdit() self.avail_filter.setPlaceholderText("search...") self.avail_filter.setMaximumWidth(350) self.avail_filter.setClearButtonEnabled(True) mid_layout = QHBoxLayout() mid_layout.addWidget(self.avail_label) mid_layout.addWidget(self.avail_filter) mid_layout.addStretch() lay.addLayout(mid_layout) self.available_list = QPluginList(uninstalled, self.installer) self.avail_filter.textChanged.connect(self.available_list.filter) lay.addWidget(self.available_list) self.stdout_text = QTextEdit(self.v_splitter) self.stdout_text.setReadOnly(True) self.stdout_text.setObjectName("pip_install_status") self.stdout_text.hide() buttonBox = QHBoxLayout() self.working_indicator = QLabel(trans._("loading ..."), self) sp = self.working_indicator.sizePolicy() sp.setRetainSizeWhenHidden(True) self.working_indicator.setSizePolicy(sp) self.process_error_indicator = QLabel(self) self.process_error_indicator.setObjectName("error_label") self.process_error_indicator.hide() load_gif = str(Path(napari.resources.__file__).parent / "loading.gif") mov = QMovie(load_gif) mov.setScaledSize(QSize(18, 18)) self.working_indicator.setMovie(mov) mov.start() self.direct_entry_edit = QLineEdit(self) self.direct_entry_edit.installEventFilter(self) self.direct_entry_edit.setPlaceholderText( trans._('install by name/url, or drop file...') ) self.direct_entry_btn = QPushButton(trans._("Install"), self) self.direct_entry_btn.clicked.connect(self._install_packages) self.show_status_btn = QPushButton(trans._("Show Status"), self) self.show_status_btn.setFixedWidth(100) self.show_sorter_btn = QPushButton(trans._("<< Show Sorter"), self) self.close_btn = QPushButton(trans._("Close"), self) self.close_btn.clicked.connect(self.accept) buttonBox.addWidget(self.show_status_btn) buttonBox.addWidget(self.working_indicator) buttonBox.addWidget(self.direct_entry_edit) buttonBox.addWidget(self.direct_entry_btn) buttonBox.addWidget(self.process_error_indicator) buttonBox.addSpacing(60) buttonBox.addWidget(self.show_sorter_btn) buttonBox.addWidget(self.close_btn) buttonBox.setContentsMargins(0, 0, 4, 0) vlay_1.addLayout(buttonBox) self.show_status_btn.setCheckable(True) self.show_status_btn.setChecked(False) self.show_status_btn.toggled.connect(self._toggle_status) self.show_sorter_btn.setCheckable(True) self.show_sorter_btn.setChecked(False) self.show_sorter_btn.toggled.connect(self._toggle_sorter) self.v_splitter.setStretchFactor(1, 2) self.h_splitter.setStretchFactor(0, 2) self.avail_filter.setFocus() def _update_count_in_label(self): count = self.available_list.count() self.avail_label.setText( trans._("Available Plugins ({count})", count=count) ) def eventFilter(self, watched, event): if event.type() == QEvent.DragEnter: # we need to accept this event explicitly to be able # to receive QDropEvents! event.accept() if event.type() == QEvent.Drop: md = event.mimeData() if md.hasUrls(): files = [url.toLocalFile() for url in md.urls()] self.direct_entry_edit.setText(files[0]) return True return super().eventFilter(watched, event) def _toggle_sorter(self, show): if show: self.show_sorter_btn.setText(trans._(">> Hide Sorter")) self.plugin_sorter.show() else: self.show_sorter_btn.setText(trans._("<< Show Sorter")) self.plugin_sorter.hide() def _toggle_status(self, show): if show: self.show_status_btn.setText(trans._("Hide Status")) self.stdout_text.show() else: self.show_status_btn.setText(trans._("Show Status")) self.stdout_text.hide() def _install_packages(self, packages: Sequence[str] = ()): if not packages: _packages = self.direct_entry_edit.text() if os.path.exists(_packages): packages = [_packages] else: packages = _packages.split() self.direct_entry_edit.clear() if packages: self.installer.install(packages) if __name__ == "__main__": from qtpy.QtWidgets import QApplication app = QApplication([]) w = QtPluginDialog() w.show() app.exec_()
37.617375
79
0.640214
2,146
20,351
5.852283
0.184529
0.01911
0.016641
0.013536
0.243411
0.157656
0.139502
0.092284
0.092284
0.077076
0
0.006986
0.26151
20,351
540
80
37.687037
0.828665
0.030121
0
0.15914
0
0
0.028813
0.001268
0
0
0
0.001852
0
1
0.053763
false
0
0.047312
0
0.11828
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a112375ff4d16de8957c825f7c7971fdb15e0cc
1,179
py
Python
hata/discord/webhook/utils.py
WizzyBots/hata
f6991afc0bebf7dad932888a536f4d010f8663c7
[ "0BSD" ]
1
2022-03-02T03:59:57.000Z
2022-03-02T03:59:57.000Z
hata/discord/webhook/utils.py
m0nk3ybraindead/hata
f87ed3d7009eeae31d6ea158772efd33775c7b1c
[ "0BSD" ]
1
2022-02-08T16:54:39.000Z
2022-02-08T16:54:39.000Z
hata/discord/webhook/utils.py
WizzyBots/hata
f6991afc0bebf7dad932888a536f4d010f8663c7
[ "0BSD" ]
null
null
null
__all__ = ('create_partial_webhook_from_id', ) from scarletio import export from ..core import USERS from .preinstanced import WebhookType from .webhook import Webhook @export def create_partial_webhook_from_id(webhook_id, token, *, type_=WebhookType.bot, channel_id=0): """ Creates a partial webhook from the given parameters. If the webhook with the given `webhook_id` already exists, then returns that instead. Parameters ---------- webhook_id : `int` The identifier number of the webhook. token : `str` The token of the webhook. type_ : ``WebhookType`` = `WebhookType.bot`, Optional (Keyword only) The webhook's type. Defaults to `WebhookType.bot`. channel_id : `int` = `0`, Optional (Keyword only) The webhook's channel's identifier. Defaults to `0`. Returns ------- webhook : ``Webhook`` """ try: webhook = USERS[webhook_id] except KeyError: webhook = Webhook._create_empty(webhook_id) webhook.channel_id = channel_id webhook.type = type_ USERS[webhook_id] = webhook webhook.token = token return webhook
27.418605
115
0.653096
141
1,179
5.269504
0.340426
0.072678
0.072678
0.064603
0.15074
0.080754
0
0
0
0
0
0.003394
0.250212
1,179
42
116
28.071429
0.837104
0.454623
0
0
0
0
0.053286
0.053286
0
0
0
0
0
1
0.0625
false
0
0.25
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a1292fe9e365e4f3b12243aeeeb62b3fcd34222
1,067
py
Python
MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 4/Problem Set 4/get_word_score.py
henriqueumeda/-Python-study
28e93a377afa4732037a29eb74d4bc7c9e24b62f
[ "MIT" ]
null
null
null
MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 4/Problem Set 4/get_word_score.py
henriqueumeda/-Python-study
28e93a377afa4732037a29eb74d4bc7c9e24b62f
[ "MIT" ]
null
null
null
MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 4/Problem Set 4/get_word_score.py
henriqueumeda/-Python-study
28e93a377afa4732037a29eb74d4bc7c9e24b62f
[ "MIT" ]
null
null
null
SCRABBLE_LETTER_VALUES = { 'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10 } def getWordScore(word, n): """ Returns the score for a word. Assumes the word is a valid word. The score for a word is the sum of the points for letters in the word, multiplied by the length of the word, PLUS 50 points if all n letters are used on the first turn. Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES) word: string (lowercase letters) n: integer (HAND_SIZE; i.e., hand size required for additional points) returns: int >= 0 """ total_points = 0 for letter in word: total_points += SCRABBLE_LETTER_VALUES[letter] total_points *= len(word) if len(word) == n: total_points += 50 return total_points print(getWordScore('waybill', 7))
35.566667
115
0.585754
191
1,067
3.209424
0.439791
0.057096
0.097879
0.039152
0.052202
0
0
0
0
0
0
0.050569
0.258669
1,067
29
116
36.793103
0.724399
0.469541
0
0
0
0
0.063707
0
0
0
0
0
0
1
0.076923
false
0
0
0
0.153846
0.076923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a14e512e0f7f79c5bcbfd4af00b8cc29f035958
6,376
py
Python
qscatv2/make_seasonal_images.py
tmilliman/sir_to_netcdf
d4641cdc5a9e92a55c0edb2dc6cd8c0e2da6f1fa
[ "MIT" ]
null
null
null
qscatv2/make_seasonal_images.py
tmilliman/sir_to_netcdf
d4641cdc5a9e92a55c0edb2dc6cd8c0e2da6f1fa
[ "MIT" ]
null
null
null
qscatv2/make_seasonal_images.py
tmilliman/sir_to_netcdf
d4641cdc5a9e92a55c0edb2dc6cd8c0e2da6f1fa
[ "MIT" ]
null
null
null
#!/usr/bin/env python # script to make seasonal means and stddev images of 4-day sig0 # values. import os import sys import glob import numpy as np import sirpy2 as sp2 import argparse from osgeo import gdal DATADIR = "./" NODATA_VALUE = -9999.0 Q2M = { "JAS": list(range(7, 10)), "OND": list(range(10, 13)), "JFM": list(range(1, 4)), "AMJ": list(range(4, 7)), } # this allows GDAL to throw Python Exceptions gdal.UseExceptions() def db2pr(dbvalue): pr = 10 ** (dbvalue / 10.0) return pr if __name__ == "__main__": # set up arguments parser = argparse.ArgumentParser( "script to make quarterly " + "means and stdevs of qscat dB values" ) parser.add_argument( "-v", "--verbose", help="increase output verbosity", action="store_true", default=False, ) parser.add_argument( "-q", "--quarter", nargs="?", choices=("JAS", "OND", "JFM", "AMJ"), default="JAS", const="JAS", help="Quarter for aggregation. Default=JAS", ) parser.add_argument("region", help="BYU region string (e.g. SAm, NAm, Ama, etc.)") parser.add_argument( "year", type=int, help="Year e.g. 1999 (qscat data start in 1999)" ) args = parser.parse_args() verbose = args.verbose year = args.year quarter = args.quarter # region list (LAEA regions only) valid_region_list = [ "Grn", "Ala", "CAm", "NAm", "SAm", "NAf", "SAf", "Sib", "Eur", "SAs", "ChJ", "Ind", "Aus", "Ber", ] region = args.region try: region_index = valid_region_list.index(region) except Exception: sys.stderr.write("Region not valid.\n") sys.stderr.write("Valid regions are:\n") sys.stderr.write("{}\n".format(valid_region_list)) sys.exit(1) if verbose: print("region: {}".format(region)) print("year: {}".format(year)) print("quarter: {}".format(quarter)) # set data dir indir = os.path.join(DATADIR, "geotiffs", region, str(year)) outdir = indir if year == 1999: year2 = 99 else: year2 = "{:02d}".format(year - 2000) monthlist = Q2M[quarter] # make a list of files for this year filepatt = "quev-a-{}{}-*.tif".format(region, year2) globpatt = os.path.join(indir, filepatt) if verbose: print("glob pattern: {}".format(globpatt)) filelist = glob.glob(globpatt) qlist = [] for filepath in filelist: fn = os.path.basename(filepath) if verbose: print(fn) fn_dt = sp2.fn2dt(fn, date_flag="center") iyear = fn_dt.year imonth = fn_dt.month iday = fn_dt.day if imonth in monthlist: qlist.append(fn) if verbose: print("{}: {}-{}-{}".format(fn, iyear, imonth, iday)) print("{}-{}: {}".format(year, quarter, qlist)) if len(qlist) == 0: warnmsg = "No images found for this quarter.\n" sys.stdout.write(warnmsg) sys.exit(0) # loop over images for this quarter db_quarter = [] for i, image in enumerate(qlist): a_imgpath = os.path.join(indir, image) try: a_ds = gdal.Open(a_imgpath) except Exception: print("Unable to open {}".format(a_imgpath)) sys.exit(1) try: srcband = a_ds.GetRasterBand(1) except Exception: print("Band ({}) not found".format(1)) sys.exit(1) a_data = srcband.ReadAsArray() a_mask = a_data == NODATA_VALUE # if this is the first image get projection and geotransform if i == 0: prj = a_ds.GetProjection() gt = a_ds.GetGeoTransform() ny, nx = a_data.shape db_data = a_data db_masked = np.ma.MaskedArray(db_data, a_mask) # add image to db_quarter list db_quarter.append(db_masked) # close datasets a_ds = None # stack list into array and find mean and std dbarray = np.ma.stack(db_quarter, axis=2) dbmean = np.ma.mean(dbarray, axis=2) dbstd = np.ma.std(dbarray, axis=2) print(dbmean.shape) # finally, save as a geotiff output_format = "GTiff" driver = gdal.GetDriverByName(output_format) dst_filename = "{}-quev-mean-db-{}-{}.tif" dst_filename = dst_filename.format(region, year, quarter) dst_dir = os.path.join(DATADIR, "geotiffs", region, str(year)) dst_path = os.path.join(dst_dir, dst_filename) if verbose: print("Output file for sig0 means: {}".format(dst_path)) dst_ds = driver.Create(dst_path, nx, ny, 1, gdal.GDT_Float32) dst_data = np.ma.filled(dbmean, fill_value=NODATA_VALUE) dst_ds.GetRasterBand(1).WriteArray(dst_data) dst_ds.GetRasterBand(1).SetNoDataValue(NODATA_VALUE) print("gt: {}".format(gt)) dst_ds.SetGeoTransform(gt) dst_ds.SetProjection(prj) dst_ds = None dbmean_min = dbmean.min() dbmean_max = dbmean.max() dbmean_median = np.ma.median(dbmean) print("Quarterly ({}) Mean Stats".format(quarter)) print(" Min: {}".format(dbmean_min)) print(" Max: {}".format(dbmean_max)) print(" Median: {}".format(dbmean_median)) # repeat for standard deviation output_format = "GTiff" driver = gdal.GetDriverByName(output_format) dst_filename = "{}-quev-std-db-{}-{}.tif".format(region, year, quarter) dst_dir = os.path.join(DATADIR, "geotiffs", region, str(year)) dst_path = os.path.join(dst_dir, dst_filename) if verbose: print("Output file: {}".format(dst_path)) dst_ds = driver.Create(dst_path, nx, ny, 1, gdal.GDT_Float32) dst_data = np.ma.filled(dbstd, fill_value=NODATA_VALUE) dst_ds.GetRasterBand(1).WriteArray(dst_data) dst_ds.GetRasterBand(1).SetNoDataValue(NODATA_VALUE) print("gt: {}".format(gt)) dst_ds.SetGeoTransform(gt) dst_ds.SetProjection(prj) dst_ds = None dbstd_min = dbstd.min() dbstd_max = dbstd.max() dbstd_median = np.ma.median(dbstd) print("Quarterly ({}) Stdev Stats".format(quarter)) print(" Min: {}".format(dbstd_min)) print(" Max: {}".format(dbstd_max)) print(" Median: {}".format(dbstd_median))
27.601732
86
0.592848
821
6,376
4.47503
0.297199
0.016331
0.019053
0.020686
0.264017
0.264017
0.246598
0.246598
0.236255
0.236255
0
0.016043
0.266782
6,376
230
87
27.721739
0.76984
0.073557
0
0.225989
0
0
0.13425
0.008316
0
0
0
0
0
1
0.00565
false
0
0.039548
0
0.050847
0.124294
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a15ab57e7398ab067062419a83d15fd9bf34d36
434
py
Python
ex062.py
noahbarros/Python-Exercises
fafda898473bc984280e201ed11d8ad76cc8624a
[ "MIT" ]
1
2021-07-13T21:41:00.000Z
2021-07-13T21:41:00.000Z
ex062.py
noahbarros/Python-Exercises
fafda898473bc984280e201ed11d8ad76cc8624a
[ "MIT" ]
null
null
null
ex062.py
noahbarros/Python-Exercises
fafda898473bc984280e201ed11d8ad76cc8624a
[ "MIT" ]
null
null
null
primeiro = int(input('Digite o priemiro termo da PA: ')) razão = int(input('Digite a razão da PA: ')) termo = primeiro cont = 1 total = 0 mais = 10 while mais != 0: total += mais while cont <= total: print(f'{termo} ', end='') termo += razão cont += 1 print('Pausa') mais = int(input('Quantos termos você quer usar a mais? ')) print(f'a progressão foi finalizada com {total} termos mostrados')
27.125
66
0.612903
63
434
4.222222
0.507937
0.090226
0.105263
0
0
0
0
0
0
0
0
0.018405
0.248848
434
15
67
28.933333
0.797546
0
0
0
0
0
0.368664
0
0
0
0
0
0
1
0
false
0
0
0
0
0.2
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a16b528f332e28d501ffe602ae57113af02e27c
3,720
py
Python
arxml_data_extractor/handler/object_handler.py
Brokdar/ArxmlDataExtractor
2853112cbd4d001418b11ccb99f1db268347dfab
[ "MIT" ]
16
2020-08-16T09:13:35.000Z
2022-03-17T13:39:26.000Z
arxml_data_extractor/handler/object_handler.py
Brokdar/ArxmlDataExtractor
2853112cbd4d001418b11ccb99f1db268347dfab
[ "MIT" ]
null
null
null
arxml_data_extractor/handler/object_handler.py
Brokdar/ArxmlDataExtractor
2853112cbd4d001418b11ccb99f1db268347dfab
[ "MIT" ]
2
2020-10-14T10:54:37.000Z
2021-07-06T01:30:44.000Z
from lxml.etree import Element, QName from typing import Union, List, Any from tqdm import tqdm import logging from arxml_data_extractor.handler import value_handler from arxml_data_extractor.handler.path_handler import PathHandler from arxml_data_extractor.asr.asr_parser import AsrParser from arxml_data_extractor.query.data_query import DataQuery from arxml_data_extractor.query.data_object import DataObject from arxml_data_extractor.query.data_value import DataValue class ObjectHandler(): def __init__(self, parser: AsrParser): self.logger = logging.getLogger() self.path_handler = PathHandler(parser) def handle(self, data_object: DataObject, node: Element = None) -> Union[list, dict]: is_not_root = True if node is None: is_not_root = False node = self.path_handler.parser.root if is_not_root: self.logger.info(f'ObjectHandler - handle DataObject(\'{data_object.name}\')') else: self.logger.info(f'ObjectHandler - [root] handle DataObject(\'{data_object.name}\')') values = [] elements = self.path_handler.elements_by_path(data_object.path, node) for element in tqdm( elements, desc=f'Handle DataObject(\'{data_object.name}\')', disable=is_not_root, bar_format="{desc:<70}{percentage:3.0f}% |{bar:70}| {n_fmt:>4}/{total_fmt}"): if element is not None: self.logger.info( f'ObjectHandler - element found: \'{QName(element).localname}\' at line {element.sourceline - 1}' ) values.append(self.__handle_values(data_object.values, element)) if not values: self.logger.warning( f'ObjectHandler - no values found for DataObject(\'{data_object.name}\')') else: self.logger.info( f'ObjectHandler - values found for DataObject(\'{data_object.name}\'): {len(values)}' ) return values[0] if len(values) == 1 else values def __handle_values(self, values: List[Union[DataValue, DataObject]], node: Element) -> dict: results = {} for value in values: if isinstance(value, DataObject): results[value.name] = self.handle(value, node) elif isinstance(value, DataValue): results[value.name] = self.__handle_value(value.query, node) if results[value.name] is None: self.logger.info( f'ObjectHandler - no value found for DataValue(\'{value.name}\')') else: self.logger.info( f'ObjectHandler - value found: DataValue(\'{value.name}\') = \'{results[value.name]}\'' ) else: error = f'ObjectHandler - invalid value type ({type(value)}). Value must be of type DataObject or DataValue' self.logger.error(error) raise TypeError(error) return results def __handle_value(self, query: DataQuery, node: Element) -> Any: if isinstance(query.path, DataQuery.XPath): if query.path.is_reference: element = self.path_handler.element_by_inline_ref(query.path, node) else: element = self.path_handler.element_by_xpath(query.path.xpath, node) else: # DataQuery.Reference isn't allowed on DataValue return None if element is None: return None return value_handler.handle(query, element)
42.272727
125
0.595968
418
3,720
5.143541
0.232057
0.04186
0.036279
0.061395
0.294419
0.223721
0.093023
0.052093
0.052093
0.052093
0
0.003868
0.305108
3,720
87
126
42.758621
0.827853
0.012366
0
0.164384
0
0.013699
0.146444
0.013947
0
0
0
0
0
1
0.054795
false
0
0.136986
0
0.273973
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a19876a956cc7df8eee4ce39d6fc5531c4cfc7c
3,401
py
Python
src/api/datamanage/pro/lifecycle/data_trace/data_set_create.py
Chromico/bk-base
be822d9bbee544a958bed4831348185a75604791
[ "MIT" ]
84
2021-06-30T06:20:23.000Z
2022-03-22T03:05:49.000Z
src/api/datamanage/pro/lifecycle/data_trace/data_set_create.py
Chromico/bk-base
be822d9bbee544a958bed4831348185a75604791
[ "MIT" ]
7
2021-06-30T06:21:16.000Z
2022-03-29T07:36:13.000Z
src/api/datamanage/pro/lifecycle/data_trace/data_set_create.py
Chromico/bk-base
be822d9bbee544a958bed4831348185a75604791
[ "MIT" ]
40
2021-06-30T06:21:26.000Z
2022-03-29T12:42:26.000Z
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from copy import deepcopy from datamanage.pro import exceptions as dm_pro_errors from datamanage.utils.api import MetaApi from datamanage.pro.utils.time import utc_to_local, str_to_datetime from datamanage.pro.lifecycle.models_dict import ( DATASET_CREATE_MAPPINGS, DATASET_CREATE_EVENT_INFO_DICT, DataTraceShowType, ComplexSearchBackendType, DataTraceFinishStatus, ) def get_dataset_create_info(dataset_id, dataset_type): """获取数据足迹中和数据创建相关信息 :param dataset_id: 数据id :param dataset_type: 数据类型 :return: 数据创建相关信息 :rtype: list """ # 1)从dgraph中获取数据创建相关信息 data_set_create_info_statement = """ { get_dataset_create_info(func: eq(%s, "%s")){created_by created_at} } """ % ( DATASET_CREATE_MAPPINGS[dataset_type]['data_set_pk'], dataset_id, ) query_result = MetaApi.complex_search( {"backend_type": ComplexSearchBackendType.DGRAPH.value, "statement": data_set_create_info_statement}, raw=True ) create_info_ret = query_result['data']['data']['get_dataset_create_info'] if not (isinstance(create_info_ret, list) and create_info_ret): raise dm_pro_errors.GetDataSetCreateInfoError(message_kv={'dataset_id': dataset_id}) # 2)得到格式化创建信息 create_trace_dict = deepcopy(DATASET_CREATE_EVENT_INFO_DICT) create_trace_dict.update( { "sub_type": dataset_type, "sub_type_alias": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'], "description": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'], "created_at": utc_to_local(create_info_ret[0]['created_at']), "created_by": create_info_ret[0]['created_by'], "show_type": DataTraceShowType.DISPLAY.value, "datetime": str_to_datetime(utc_to_local(create_info_ret[0]['created_at'])), "status": DataTraceFinishStatus.STATUS, "status_alias": DataTraceFinishStatus.STATUS_ALIAS, } ) return [create_trace_dict]
44.168831
118
0.728021
448
3,401
5.296875
0.426339
0.046355
0.03287
0.047198
0.139064
0.086389
0.086389
0.069954
0.069954
0
0
0.004278
0.175243
3,401
76
119
44.75
0.841711
0.438695
0
0
0
0
0.173518
0.050187
0
0
0
0
0
1
0.02439
false
0
0.121951
0
0.170732
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a1cd65b30b7bbba4f6241ea55e68759c3f56fc4
15,868
py
Python
splash/render_options.py
tashidexiaoL/splashnew
2bbb886bae8fa88c30a4460f41ca940c4b010287
[ "BSD-3-Clause" ]
3,612
2015-01-04T07:22:20.000Z
2022-03-31T07:12:19.000Z
splash/render_options.py
tashidexiaoL/splashnew
2bbb886bae8fa88c30a4460f41ca940c4b010287
[ "BSD-3-Clause" ]
983
2015-01-01T17:54:49.000Z
2022-03-29T05:05:53.000Z
splash/render_options.py
tashidexiaoL/splashnew
2bbb886bae8fa88c30a4460f41ca940c4b010287
[ "BSD-3-Clause" ]
570
2015-01-06T17:48:46.000Z
2022-03-31T12:35:32.000Z
# -*- coding: utf-8 -*- import os import json from splash import defaults from splash.utils import to_bytes, path_join_secure from splash.errors import BadOption class RenderOptions(object): """ Options that control how to render a response. """ _REQUIRED = object() def __init__(self, data, max_timeout): self.data = data self.max_timeout = max_timeout @classmethod def raise_error(cls, argument, description, type='bad_argument', **kwargs): params = { 'type': type, 'argument': argument, 'description': description } params.update(kwargs) raise BadOption(params) @classmethod def fromrequest(cls, request, max_timeout): """ Initialize options from a Twisted Request. """ # 1. GET / POST data data = {key.decode('utf-8'): values[0].decode('utf-8') for key, values in request.args.items()} if request.method == b'POST': content_type = request.getHeader(b'content-type') if content_type: request.content.seek(0) # 2. application/json POST data if b'application/json' in content_type: try: content = request.content.read().decode('utf-8') data.update(json.loads(content)) except ValueError as e: raise BadOption({ 'type': 'invalid_json', 'description': "Can't decode JSON", 'message': str(e), }) # 3. js_source from application/javascript POST requests if b'application/javascript' in content_type: data['js_source'] = request.content.read().decode('utf-8') request.content.seek(0) data['uid'] = id(request) return cls(data, max_timeout) def get_expired_args(self, cache): """ Return a list of argument names from load_args which can't be loaded """ return cache.get_missing(self.get_load_args().items()) def save_args_to_cache(self, cache): """ Process save_args and put all values to cache. Return a list of (name, key) pairs. """ save_args = self.get_save_args() save_values = [self.data.get(name) for name in save_args] keys = cache.add_many(save_values) return list(zip(save_args, keys)) def load_cached_args(self, cache): load_args = self.get_load_args() for name, key in (load_args or {}).items(): self.data[name] = cache[key] def get(self, name, default=_REQUIRED, type=str, range=None): value = self.data.get(name) if value is not None: if type is not None: try: value = type(value) except ValueError: msg = "Argument %r has a wrong type" % (name,) self.raise_error(name, msg, required_type=type.__name__) if range is not None and not (range[0] <= value <= range[1]): self.raise_error(name, 'Argument is out of the allowed range', min=range[0], max=range[1], value=value) return value elif default is self._REQUIRED: self.raise_error(name, 'Required argument is missing: %s' % name, type='argument_required') else: return default def _get_bool(self, name, default=_REQUIRED): return self.get(name, default, type=int, range=(0, 1)) def _get_url(self, name, default=_REQUIRED): url = self.get(name, default, type=None) if isinstance(url, bytes): url = url.decode('utf8') return url def get_uid(self): return self.get('uid') def get_url(self): return self._get_url("url") def get_baseurl(self): return self._get_url("baseurl", default=None) def get_wait(self): return self.get("wait", defaults.WAIT_TIME, type=float, range=(0, self.get_timeout())) def get_timeout(self): default = min(self.max_timeout, defaults.TIMEOUT) return self.get("timeout", default, type=float, range=(0, self.max_timeout)) def get_resource_timeout(self): return self.get("resource_timeout", defaults.RESOURCE_TIMEOUT, type=float, range=(0, 1e6)) def get_response_body(self): return self._get_bool("response_body", defaults.RESPONSE_BODY_ENABLED) def get_request_body(self): return self._get_bool("request_body", defaults.REQUEST_BODY_ENABLED) def get_images(self): return self._get_bool("images", defaults.AUTOLOAD_IMAGES) def get_proxy(self): return self.get("proxy", default=None) def get_js_source(self): return self.get("js_source", default=None) def get_width(self): return self.get("width", None, type=int, range=(1, defaults.MAX_WIDTH)) def get_height(self): return self.get("height", None, type=int, range=(1, defaults.MAX_HEIGTH)) def get_scale_method(self): scale_method = self.get("scale_method", defaults.IMAGE_SCALE_METHOD) allowed_scale_methods = ['raster', 'vector'] if scale_method not in allowed_scale_methods: self.raise_error( argument='scale_method', description="Invalid 'scale_method': %s" % scale_method, allowed=allowed_scale_methods, received=scale_method, ) return scale_method def get_quality(self): return self.get("quality", defaults.JPEG_QUALITY, type=int, range=(0, 100)) def get_http_method(self): method = self.get("http_method", "GET") if method.upper() not in ["POST", "GET"]: self.raise_error("http_method", "Unsupported HTTP method {}".format(method)) return method def get_body(self): body = self.get("body", None, to_bytes) method = self.get("http_method", "GET").upper() if method == 'GET' and body: self.raise_error("body", "GET request should not have a body") return body def get_render_all(self, wait=None): result = self._get_bool("render_all", False) if result == 1 and wait == 0: self.raise_error("render_all", "Pass non-zero 'wait' to render full webpage") return result def get_lua_source(self): return self.get("lua_source") def get_js_profile(self, js_profiles_path): js_profile = self.get("js", default=None) if not js_profile: return js_profile if js_profiles_path is None: self.raise_error('js', 'Javascript profiles are not enabled on server') try: profile_dir = path_join_secure(js_profiles_path, js_profile) except ValueError as e: # security check fails print(e) self.raise_error('js', 'Javascript profile does not exist') if not os.path.isdir(profile_dir): self.raise_error('js', 'Javascript profile does not exist') return profile_dir def get_headers(self): headers = self.get("headers", default=None, type=None) if headers is None: return headers if not isinstance(headers, (list, tuple, dict)): self.raise_error( argument='headers', description="'headers' must be either a JSON array of " "(name, value) pairs or a JSON object" ) if isinstance(headers, (list, tuple)): for el in headers: string_only = all(isinstance(e, str) for e in el) if not (isinstance(el, (list, tuple)) and len(el) == 2 and string_only): self.raise_error( argument='headers', description="'headers' must be either a JSON array of " "(name, value) pairs or a JSON object" ) return headers def get_save_args(self): save_args = self.get("save_args", default=None, type=None) if save_args is None: return [] if isinstance(save_args, str): # comma-separated string save_args = save_args.split(',') if not isinstance(save_args, list): self.raise_error( argument="save_args", description="'save_args' should be either a comma-separated " "string or a JSON array with argument names", ) # JSON array if not all(isinstance(a, str) for a in save_args): self.raise_error( argument="save_args", description="'save_args' should be a list of strings", ) return save_args def get_load_args(self): load_args = self.get("load_args", default=None, type=None) if load_args is None: return {} if isinstance(load_args, str): try: load_args = dict( kv.split("=", 1) for kv in load_args.split(';') ) except ValueError: self.raise_error( argument="load_args", description="'load_args' string value is not a " "semicolon-separated list of name=hash pairs" ) if not isinstance(load_args, dict): self.raise_error( argument="load_args", description="'load_args' should be either a JSON object with " "argument hashes or a semicolon-separated list " "of name=hash pairs" ) return load_args def get_viewport(self, wait=None): viewport = self.get("viewport", defaults.VIEWPORT_SIZE) if viewport == 'full': if wait == 0: self.raise_error("viewport", "Pass non-zero 'wait' to render full webpage") else: try: validate_size_str(viewport) except ValueError as e: self.raise_error("viewport", str(e)) return viewport def get_filters(self, pool=None, adblock_rules=None): filter_names = self.get('filters', '') filter_names = [f for f in filter_names.split(',') if f] if pool is None and adblock_rules is None: # skip validation return filter_names if not filter_names: return filter_names if pool is not None: adblock_rules = pool.network_manager_factory.adblock_rules if adblock_rules is None: self.raise_error( "filters", "Invalid filter names: %s" % (filter_names,) ) if adblock_rules is not None: unknown_filters = adblock_rules.get_unknown_filters(filter_names) if unknown_filters: self.raise_error( "filters", "Invalid filter names: %s" % (unknown_filters,) ) return filter_names def get_allowed_domains(self): allowed_domains = self.get("allowed_domains", default=None) if allowed_domains is not None: return allowed_domains.split(',') def get_allowed_content_types(self): content_types = self.get("allowed_content_types", default=['*']) if isinstance(content_types, str): content_types = list(filter(None, content_types.split(','))) return content_types def get_forbidden_content_types(self): content_types = self.get("forbidden_content_types", default=[]) if isinstance(content_types, str): content_types = list(filter(None, content_types.split(','))) return content_types def get_html5_media(self): return self._get_bool("html5_media", defaults.HTML5_MEDIA_ENABLED) def get_engine(self, browser_engines_enabled=None): engine = self.get("engine", default="webkit", type=str) if engine not in {"webkit", "chromium"}: self.raise_error("engine", "Unknown render engine {}".format(engine)) if browser_engines_enabled is not None: if engine not in browser_engines_enabled: self.raise_error("engine", "Disabled render engine {}".format(engine)) return engine def get_http2(self): engine = self.get_engine() if self.get_engine() == "webkit": default = defaults.WEBKIT_HTTP2_ENABLED else: assert engine == 'chromium' default = defaults.CHROMIUM_HTTP2_ENABLED return self._get_bool("http2", default) def get_common_params(self, js_profiles_path): wait = self.get_wait() return { 'url': self.get_url(), 'baseurl': self.get_baseurl(), 'wait': wait, 'resource_timeout': self.get_resource_timeout(), 'viewport': self.get_viewport(wait), 'render_all': self.get_render_all(wait), 'images': self.get_images(), 'headers': self.get_headers(), 'proxy': self.get_proxy(), 'js_profile': self.get_js_profile(js_profiles_path), 'js_source': self.get_js_source(), 'http_method': self.get_http_method(), 'body': self.get_body(), 'html5_media': self.get_html5_media(), 'http2': self.get_http2(), # 'lua': self.get_lua(), } def get_image_params(self): return { 'width': self.get_width(), 'height': self.get_height(), 'scale_method': self.get_scale_method() } def get_png_params(self): return self.get_image_params() def get_jpeg_params(self): params = {'quality': self.get_quality()} params.update(self.get_image_params()) return params def get_include_params(self): return dict( html=self._get_bool("html", defaults.DO_HTML), iframes=self._get_bool("iframes", defaults.DO_IFRAMES), png=self._get_bool("png", defaults.DO_PNG), jpeg=self._get_bool("jpeg", defaults.DO_JPEG), script=self._get_bool("script", defaults.SHOW_SCRIPT), console=self._get_bool("console", defaults.SHOW_CONSOLE), history=self._get_bool("history", defaults.SHOW_HISTORY), har=self._get_bool("har", defaults.SHOW_HAR), ) def validate_size_str(size_str): """ Validate size string in WxH format. Can be used to validate both viewport and window size strings. Does not special-case ``'full'`` viewport. Raises ``ValueError`` if anything goes wrong. :param size_str: string to validate """ max_width = defaults.VIEWPORT_MAX_WIDTH max_heigth = defaults.VIEWPORT_MAX_HEIGTH max_area = defaults.VIEWPORT_MAX_AREA try: w, h = map(int, size_str.split('x')) except ValueError: raise ValueError("Invalid viewport format: %s" % size_str) else: if not ((0 < w <= max_width) and (0 < h <= max_heigth) and (w * h < max_area)): raise ValueError("Viewport (%dx%d, area=%d) is out of range (%dx%d, area=%d)" % (w, h, w * h, max_width, max_heigth, max_area))
35.578475
91
0.568188
1,870
15,868
4.616043
0.137968
0.056766
0.035681
0.031511
0.225788
0.171455
0.124421
0.109824
0.083874
0.062095
0
0.004408
0.328019
15,868
445
92
35.658427
0.805121
0.043484
0
0.171598
0
0.002959
0.126687
0.004387
0
0
0
0
0.002959
1
0.136095
false
0.005917
0.014793
0.056213
0.301775
0.002959
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a1d8b11d101fed7641300b2c4ef25ddc8a61c8b
362
py
Python
syntax/func.py
sangumee/Opentutorials-Webn-Python
9f813f8f342ea99ffee6e31f363f175fa023c489
[ "MIT" ]
null
null
null
syntax/func.py
sangumee/Opentutorials-Webn-Python
9f813f8f342ea99ffee6e31f363f175fa023c489
[ "MIT" ]
null
null
null
syntax/func.py
sangumee/Opentutorials-Webn-Python
9f813f8f342ea99ffee6e31f363f175fa023c489
[ "MIT" ]
null
null
null
# code.... a = 1 b = 2 c = 3 s = a+b+c r = s/3 print(r) # code.... ''' def average(): a=1 b=2 c=3 s=a+b+c r=s/3 print(r) average() ''' ''' #input #parameter #argument def average(a,b,c): s=a+b+c r=s/3 print(r) average(10,20,30) ''' def average(a, b, c): s = a+b+c r = s/3 return r print(average(10, 20, 30))
9.05
26
0.466851
75
362
2.253333
0.253333
0.071006
0.106509
0.094675
0.579882
0.579882
0.579882
0.579882
0.579882
0.579882
0
0.087649
0.30663
362
39
27
9.282051
0.585657
0.046961
0
0.363636
0
0
0
0
0
0
0
0
0
1
0.090909
false
0
0
0
0.181818
0.181818
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a1dc389d59f49c155580d9fe0bb5e5e94a7281e
1,718
py
Python
tools/evolution/codingSnps_filter.py
ramezrawas/galaxy-1
c03748dd49c060a68d07bce56eae33e0ba154414
[ "CC-BY-3.0" ]
1
2019-11-03T11:45:43.000Z
2019-11-03T11:45:43.000Z
tools/evolution/codingSnps_filter.py
ramezrawas/galaxy-1
c03748dd49c060a68d07bce56eae33e0ba154414
[ "CC-BY-3.0" ]
7
2016-12-07T22:19:37.000Z
2019-01-30T15:04:26.000Z
tools/evolution/codingSnps_filter.py
ramezrawas/galaxy-1
c03748dd49c060a68d07bce56eae33e0ba154414
[ "CC-BY-3.0" ]
null
null
null
#!/usr/bin/env python # runs after the job (and after the default post-filter) from galaxy.tools.parameters import DataToolParameter # Older py compatibility try: set() except: from sets import Set as set def validate_input( trans, error_map, param_values, page_param_map ): dbkeys = set() data_param_names = set() data_params = 0 for name, param in page_param_map.items(): if isinstance( param, DataToolParameter ): # for each dataset parameter if param_values.get(name, None) is not None: dbkeys.add( param_values[name].dbkey ) data_params += 1 # check meta data try: param = param_values[name] int( param.metadata.startCol ) int( param.metadata.endCol ) int( param.metadata.chromCol ) if param.metadata.strandCol is not None: int( param.metadata.strandCol ) except: error_msg = ("The attributes of this dataset are not properly set. " "Click the pencil icon in the history item to set the chrom, start, end and strand columns.") error_map[name] = error_msg data_param_names.add( name ) if len( dbkeys ) > 1: for name in data_param_names: error_map[name] = "All datasets must belong to same genomic build, " \ "this dataset is linked to build '%s'" % param_values[name].dbkey if data_params != len(data_param_names): for name in data_param_names: error_map[name] = "A dataset of the appropriate type is required"
40.904762
117
0.586147
212
1,718
4.613208
0.448113
0.056237
0.071575
0.0409
0.071575
0.071575
0.071575
0.071575
0.071575
0
0
0.002648
0.340512
1,718
41
118
41.902439
0.860547
0.082072
0
0.181818
0
0
0.173028
0
0
0
0
0
0
1
0.030303
false
0
0.060606
0
0.090909
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a1e3dc4bc93e35762cbfc644a38e3db21861cda
5,290
py
Python
qa/rpc-tests/listtransactions.py
DeftNerd/bitcoinclassic
afff0155e0dd528145818c43f259743f54966d95
[ "MIT" ]
8
2016-03-31T18:47:31.000Z
2021-09-30T05:42:32.000Z
qa/rpc-tests/listtransactions.py
DeftNerd/bitcoinclassic
afff0155e0dd528145818c43f259743f54966d95
[ "MIT" ]
1
2017-10-06T08:55:30.000Z
2017-10-06T08:55:30.000Z
qa/rpc-tests/listtransactions.py
DeftNerd/bitcoinclassic
afff0155e0dd528145818c43f259743f54966d95
[ "MIT" ]
2
2020-02-03T03:38:10.000Z
2021-09-30T05:42:36.000Z
#!/usr/bin/env python2 # Copyright (c) 2014-2015 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Exercise the listtransactions API from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * def check_array_result(object_array, to_match, expected): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. """ num_matched = 0 for item in object_array: all_match = True for key,value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue for key,value in expected.items(): if item[key] != value: raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value))) num_matched = num_matched+1 if num_matched == 0: raise AssertionError("No objects matched %s"%(str(to_match))) class ListTransactionsTest(BitcoinTestFramework): def run_test(self): # Simple send, 0 to 1: txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() check_array_result(self.nodes[0].listtransactions(), {"txid":txid}, {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0}) check_array_result(self.nodes[1].listtransactions(), {"txid":txid}, {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0}) # mine a block, confirmations should change: self.nodes[0].generate(1) self.sync_all() check_array_result(self.nodes[0].listtransactions(), {"txid":txid}, {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1}) check_array_result(self.nodes[1].listtransactions(), {"txid":txid}, {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1}) # send-to-self: txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) check_array_result(self.nodes[0].listtransactions(), {"txid":txid, "category":"send"}, {"amount":Decimal("-0.2")}) check_array_result(self.nodes[0].listtransactions(), {"txid":txid, "category":"receive"}, {"amount":Decimal("0.2")}) # sendmany from node1: twice to self, twice to node2: send_to = { self.nodes[0].getnewaddress() : 0.11, self.nodes[1].getnewaddress() : 0.22, self.nodes[0].getaccountaddress("from1") : 0.33, self.nodes[1].getaccountaddress("toself") : 0.44 } txid = self.nodes[1].sendmany("", send_to) self.sync_all() check_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.11")}, {"txid":txid} ) check_array_result(self.nodes[0].listtransactions(), {"category":"receive","amount":Decimal("0.11")}, {"txid":txid} ) check_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.22")}, {"txid":txid} ) check_array_result(self.nodes[1].listtransactions(), {"category":"receive","amount":Decimal("0.22")}, {"txid":txid} ) check_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.33")}, {"txid":txid} ) check_array_result(self.nodes[0].listtransactions(), {"category":"receive","amount":Decimal("0.33")}, {"txid":txid, "account" : "from1"} ) check_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.44")}, {"txid":txid, "account" : ""} ) check_array_result(self.nodes[1].listtransactions(), {"category":"receive","amount":Decimal("0.44")}, {"txid":txid, "account" : "toself"} ) multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()]) self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True) txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1) self.nodes[1].generate(1) self.sync_all() assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0) check_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True), {"category":"receive","amount":Decimal("0.1")}, {"txid":txid, "account" : "watchonly"} ) if __name__ == '__main__': ListTransactionsTest().main()
48.53211
105
0.542722
551
5,290
5.098004
0.221416
0.099324
0.091136
0.1068
0.562478
0.489142
0.484514
0.41189
0.407974
0.407974
0
0.032606
0.298488
5,290
108
106
48.981481
0.724333
0.095841
0
0.349398
0
0
0.136211
0
0
0
0
0
0.036145
1
0.024096
false
0
0.036145
0
0.072289
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a1ef1c625b2d34cef5abbf769654ee6310e0334
25,352
py
Python
salt/modules/mount.py
aletourneau/salt
d7013a2f64eb4b79592220d76274bc5dde609e08
[ "Apache-2.0" ]
null
null
null
salt/modules/mount.py
aletourneau/salt
d7013a2f64eb4b79592220d76274bc5dde609e08
[ "Apache-2.0" ]
null
null
null
salt/modules/mount.py
aletourneau/salt
d7013a2f64eb4b79592220d76274bc5dde609e08
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- ''' Salt module to manage unix mounts and the fstab file ''' from __future__ import absolute_import # Import python libs import os import re import logging # Import salt libs import salt.utils from salt._compat import string_types from salt.utils import which as _which from salt.exceptions import CommandNotFoundError, CommandExecutionError # Set up logger log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mount' def __virtual__(): ''' Only load on POSIX-like systems ''' # Disable on Windows, a specific file module exists: if salt.utils.is_windows(): return False return True def _list_mounts(): ret = {} if __grains__['os'] in ['MacOS', 'Darwin']: mounts = __salt__['cmd.run_stdout']('mount') else: mounts = __salt__['cmd.run_stdout']('mount -l') for line in mounts.split('\n'): comps = re.sub(r"\s+", " ", line).split() ret[comps[2]] = comps[0] return ret def _active_mountinfo(ret): _list = _list_mounts() filename = '/proc/self/mountinfo' if not os.access(filename, os.R_OK): msg = 'File not readable {0}' raise CommandExecutionError(msg.format(filename)) blkid_info = __salt__['disk.blkid']() with salt.utils.fopen(filename) as ifile: for line in ifile: comps = line.split() device = comps[2].split(':') device_name = comps[8] device_uuid = None if device_name: device_uuid = blkid_info.get(device_name, {}).get('UUID') device_uuid = device_uuid and device_uuid.lower() ret[comps[4]] = {'mountid': comps[0], 'parentid': comps[1], 'major': device[0], 'minor': device[1], 'root': comps[3], 'opts': comps[5].split(','), 'fstype': comps[7], 'device': device_name, 'alt_device': _list.get(comps[4], None), 'superopts': comps[9].split(','), 'device_uuid': device_uuid} return ret def _active_mounts(ret): ''' List active mounts on Linux systems ''' _list = _list_mounts() filename = '/proc/self/mounts' if not os.access(filename, os.R_OK): msg = 'File not readable {0}' raise CommandExecutionError(msg.format(filename)) with salt.utils.fopen(filename) as ifile: for line in ifile: comps = line.split() ret[comps[1]] = {'device': comps[0], 'alt_device': _list.get(comps[1], None), 'fstype': comps[2], 'opts': comps[3].split(',')} return ret def _active_mounts_freebsd(ret): ''' List active mounts on FreeBSD systems ''' for line in __salt__['cmd.run_stdout']('mount -p').split('\n'): comps = re.sub(r"\s+", " ", line).split() ret[comps[1]] = {'device': comps[0], 'fstype': comps[2], 'opts': comps[3].split(',')} return ret def _active_mounts_solaris(ret): ''' List active mounts on Solaris systems ''' for line in __salt__['cmd.run_stdout']('mount -v').split('\n'): comps = re.sub(r"\s+", " ", line).split() ret[comps[2]] = {'device': comps[0], 'fstype': comps[4], 'opts': comps[5].split('/')} return ret def _active_mounts_openbsd(ret): ''' List active mounts on OpenBSD systems ''' for line in __salt__['cmd.run_stdout']('mount -v').split('\n'): comps = re.sub(r"\s+", " ", line).split() nod = __salt__['cmd.run_stdout']('ls -l {0}'.format(comps[0])) nod = ' '.join(nod.split()).split(" ") parens = re.findall(r'\((.*?)\)', line, re.DOTALL) ret[comps[3]] = {'device': comps[0], 'fstype': comps[5], 'opts': parens[1].split(", "), 'major': str(nod[4].strip(",")), 'minor': str(nod[5]), 'device_uuid': parens[0]} return ret def _active_mounts_darwin(ret): ''' List active mounts on Mac OS systems ''' for line in __salt__['cmd.run_stdout']('mount').split('\n'): comps = re.sub(r"\s+", " ", line).split() parens = re.findall(r'\((.*?)\)', line, re.DOTALL)[0].split(", ") ret[comps[2]] = {'device': comps[0], 'fstype': parens[0], 'opts': parens[1:]} return ret def active(extended=False): ''' List the active mounts. CLI Example: .. code-block:: bash salt '*' mount.active ''' ret = {} if __grains__['os'] == 'FreeBSD': _active_mounts_freebsd(ret) elif __grains__['os'] == 'Solaris': _active_mounts_solaris(ret) elif __grains__['os'] == 'OpenBSD': _active_mounts_openbsd(ret) elif __grains__['os'] in ['MacOS', 'Darwin']: _active_mounts_darwin(ret) else: if extended: try: _active_mountinfo(ret) except CommandExecutionError: _active_mounts(ret) else: _active_mounts(ret) return ret def fstab(config='/etc/fstab'): ''' List the contents of the fstab CLI Example: .. code-block:: bash salt '*' mount.fstab ''' ret = {} if not os.path.isfile(config): return ret with salt.utils.fopen(config) as ifile: for line in ifile: if line.startswith('#'): # Commented continue if not line.strip(): # Blank line continue comps = line.split() if len(comps) != 6: # Invalid entry continue ret[comps[1]] = {'device': comps[0], 'fstype': comps[2], 'opts': comps[3].split(','), 'dump': comps[4], 'pass': comps[5]} return ret def rm_fstab(name, device, config='/etc/fstab'): ''' Remove the mount point from the fstab CLI Example: .. code-block:: bash salt '*' mount.rm_fstab /mnt/foo ''' contents = fstab(config) if name not in contents: return True # The entry is present, get rid of it lines = [] try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: if line.startswith('#'): # Commented lines.append(line) continue if not line.strip(): # Blank line lines.append(line) continue comps = line.split() if len(comps) != 6: # Invalid entry lines.append(line) continue comps = line.split() if device: if comps[1] == name and comps[0] == device: continue else: if comps[1] == name: continue lines.append(line) except (IOError, OSError) as exc: msg = "Couldn't read from {0}: {1}" raise CommandExecutionError(msg.format(config, str(exc))) try: with salt.utils.fopen(config, 'w+') as ofile: ofile.writelines(lines) except (IOError, OSError) as exc: msg = "Couldn't write to {0}: {1}" raise CommandExecutionError(msg.format(config, str(exc))) return True def set_fstab( name, device, fstype, opts='defaults', dump=0, pass_num=0, config='/etc/fstab', test=False, **kwargs): ''' Verify that this mount is represented in the fstab, change the mount to match the data passed, or add the mount if it is not present. CLI Example: .. code-block:: bash salt '*' mount.set_fstab /mnt/foo /dev/sdz1 ext4 ''' # Fix the opts type if it is a list if isinstance(opts, list): opts = ','.join(opts) lines = [] change = False present = False if not os.path.isfile(config): raise CommandExecutionError('Bad config file "{0}"'.format(config)) try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: if line.startswith('#'): # Commented lines.append(line) continue if not line.strip(): # Blank line lines.append(line) continue comps = line.split() if len(comps) != 6: # Invalid entry lines.append(line) continue if comps[1] == name or comps[0] == device: # check to see if there are changes # and fix them if there are any present = True if comps[0] != device: change = True comps[0] = device if comps[1] != name: change = True comps[1] = name if comps[2] != fstype: change = True comps[2] = fstype if comps[3] != opts: change = True comps[3] = opts if comps[4] != str(dump): change = True comps[4] = str(dump) if comps[5] != str(pass_num): change = True comps[5] = str(pass_num) if change: log.debug( 'fstab entry for mount point {0} needs to be ' 'updated'.format(name) ) newline = ( '{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format( device, name, fstype, opts, dump, pass_num ) ) lines.append(newline) else: lines.append(line) except (IOError, OSError) as exc: msg = 'Couldn\'t read from {0}: {1}' raise CommandExecutionError(msg.format(config, str(exc))) if change: if not salt.utils.test_mode(test=test, **kwargs): try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): msg = 'File not writable {0}' raise CommandExecutionError(msg.format(config)) return 'change' if not change: if present: # The right entry is already here return 'present' else: if not salt.utils.test_mode(test=test, **kwargs): # The entry is new, add it to the end of the fstab newline = '{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format(device, name, fstype, opts, dump, pass_num) lines.append(newline) try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): raise CommandExecutionError( 'File not writable {0}'.format( config ) ) return 'new' def rm_automaster(name, device, config='/etc/auto_salt'): ''' Remove the mount point from the auto_master CLI Example: .. code-block:: bash salt '*' mount.rm_automaster /mnt/foo ''' contents = automaster(config) if name not in contents: return True # The entry is present, get rid of it lines = [] try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: if line.startswith('#'): # Commented lines.append(line) continue if not line.strip(): # Blank line lines.append(line) continue comps = line.split() if len(comps) != 3: # Invalid entry lines.append(line) continue comps = line.split() prefix = "/.." name_chk = comps[0].replace(prefix, "") device_fmt = comps[2].split(":") if device: if name_chk == name and device_fmt[1] == device: continue else: if name_chk == name: continue lines.append(line) except (IOError, OSError) as exc: msg = "Couldn't read from {0}: {1}" raise CommandExecutionError(msg.format(config, str(exc))) try: with salt.utils.fopen(config, 'w+') as ofile: ofile.writelines(lines) except (IOError, OSError) as exc: msg = "Couldn't write to {0}: {1}" raise CommandExecutionError(msg.format(config, str(exc))) # Update automount __salt__['cmd.run']('automount -cv') return True def set_automaster( name, device, fstype, opts='', config='/etc/auto_salt', test=False, **kwargs): ''' Verify that this mount is represented in the auto_salt, change the mount to match the data passed, or add the mount if it is not present. CLI Example: .. code-block:: bash salt '*' mount.set_automaster /mnt/foo /dev/sdz1 ext4 ''' # Fix the opts type if it is a list if isinstance(opts, list): opts = ','.join(opts) lines = [] change = False present = False automaster_file = "/etc/auto_master" if not os.path.isfile(config): __salt__['file.touch'](config) __salt__['file.append'](automaster_file, "/-\t\t\t{0}".format(config)) name = "/..{0}".format(name) device_fmt = "{0}:{1}".format(fstype, device) type_opts = "-fstype={0},{1}".format(fstype, opts) if fstype == 'smbfs': device_fmt = device_fmt.replace(fstype, "") try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: if line.startswith('#'): # Commented lines.append(line) continue if not line.strip(): # Blank line lines.append(line) continue comps = line.split() if len(comps) != 3: # Invalid entry lines.append(line) continue if comps[0] == name or comps[2] == device_fmt: # check to see if there are changes # and fix them if there are any present = True if comps[0] != name: change = True comps[0] = name if comps[1] != type_opts: change = True comps[1] = type_opts if comps[2] != device_fmt: change = True comps[2] = device_fmt if change: log.debug( 'auto_master entry for mount point {0} needs to be ' 'updated'.format(name) ) newline = ( '{0}\t{1}\t{2}\n'.format( name, type_opts, device_fmt) ) lines.append(newline) else: lines.append(line) except (IOError, OSError) as exc: msg = 'Couldn\'t read from {0}: {1}' raise CommandExecutionError(msg.format(config, str(exc))) if change: if not salt.utils.test_mode(test=test, **kwargs): try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): msg = 'File not writable {0}' raise CommandExecutionError(msg.format(config)) return 'change' if not change: if present: # The right entry is already here return 'present' else: if not salt.utils.test_mode(test=test, **kwargs): # The entry is new, add it to the end of the fstab newline = ( '{0}\t{1}\t{2}\n'.format( name, type_opts, device_fmt) ) lines.append(newline) try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): raise CommandExecutionError( 'File not writable {0}'.format( config ) ) return 'new' def automaster(config='/etc/auto_salt'): ''' List the contents of the fstab CLI Example: .. code-block:: bash salt '*' mount.fstab ''' ret = {} if not os.path.isfile(config): return ret with salt.utils.fopen(config) as ifile: for line in ifile: if line.startswith('#'): # Commented continue if not line.strip(): # Blank line continue comps = line.split() if len(comps) != 3: # Invalid entry continue prefix = "/.." name = comps[0].replace(prefix, "") device_fmt = comps[2].split(":") opts = comps[1].split(',') ret[name] = {'device': device_fmt[1], 'fstype': opts[0], 'opts': opts[1:]} return ret def mount(name, device, mkmnt=False, fstype='', opts='defaults', user=None): ''' Mount a device CLI Example: .. code-block:: bash salt '*' mount.mount /mnt/foo /dev/sdz1 True ''' # Darwin doesn't expect defaults when mounting without other options if 'defaults' in opts and __grains__['os'] in ['MacOS', 'Darwin']: opts = None if isinstance(opts, string_types): opts = opts.split(',') if not os.path.exists(name) and mkmnt: __salt__['file.mkdir'](name=name, user=user) args = '' if opts is not None: lopts = ','.join(opts) args = '-o {0}'.format(lopts) if fstype: args += ' -t {0}'.format(fstype) cmd = 'mount {0} {1} {2} '.format(args, device, name) out = __salt__['cmd.run_all'](cmd, runas=user) if out['retcode']: return out['stderr'] return True def remount(name, device, mkmnt=False, fstype='', opts='defaults', user=None): ''' Attempt to remount a device, if the device is not already mounted, mount is called CLI Example: .. code-block:: bash salt '*' mount.remount /mnt/foo /dev/sdz1 True ''' force_mount = False if __grains__['os'] in ['MacOS', 'Darwin']: if opts == 'defaults': opts = 'noowners' if fstype == 'smbfs': force_mount = True if isinstance(opts, string_types): opts = opts.split(',') mnts = active() if name in mnts: # The mount point is mounted, attempt to remount it with the given data if 'remount' not in opts and __grains__['os'] not in ['OpenBSD', 'MacOS', 'Darwin']: opts.append('remount') if force_mount: # We need to force the mount but first we should unmount umount(name, device, user=user) lopts = ','.join(opts) args = '-o {0}'.format(lopts) if fstype: args += ' -t {0}'.format(fstype) if __grains__['os'] not in ['OpenBSD', 'MacOS', 'Darwin'] or force_mount: cmd = 'mount {0} {1} {2} '.format(args, device, name) else: cmd = 'mount -u {0} {1} {2} '.format(args, device, name) out = __salt__['cmd.run_all'](cmd, runas=user) if out['retcode']: return out['stderr'] return True # Mount a filesystem that isn't already return mount(name, device, mkmnt, fstype, opts, user=user) def umount(name, device=None, user=None): ''' Attempt to unmount a device by specifying the directory it is mounted on CLI Example: .. code-block:: bash salt '*' mount.umount /mnt/foo .. versionadded:: Lithium salt '*' mount.umount /mnt/foo /dev/xvdc1 ''' mnts = active() if name not in mnts: return "{0} does not have anything mounted".format(name) if not device: cmd = 'umount {0}'.format(name) else: cmd = 'umount {0}'.format(device) out = __salt__['cmd.run_all'](cmd, runas=user) if out['retcode']: return out['stderr'] return True def is_fuse_exec(cmd): ''' Returns true if the command passed is a fuse mountable application. CLI Example: .. code-block:: bash salt '*' mount.is_fuse_exec sshfs ''' cmd_path = _which(cmd) # No point in running ldd on a command that doesn't exist if not cmd_path: return False elif not _which('ldd'): raise CommandNotFoundError('ldd') out = __salt__['cmd.run']('ldd {0}'.format(cmd_path)) return 'libfuse' in out def swaps(): ''' Return a dict containing information on active swap CLI Example: .. code-block:: bash salt '*' mount.swaps ''' ret = {} if __grains__['os'] != 'OpenBSD': with salt.utils.fopen('/proc/swaps') as fp_: for line in fp_: if line.startswith('Filename'): continue comps = line.split() ret[comps[0]] = {'type': comps[1], 'size': comps[2], 'used': comps[3], 'priority': comps[4]} else: for line in __salt__['cmd.run_stdout']('swapctl -kl').splitlines(): if line.startswith(('Device', 'Total')): continue swap_type = "file" comps = line.split() if comps[0].startswith('/dev/'): swap_type = "partition" ret[comps[0]] = {'type': swap_type, 'size': comps[1], 'used': comps[2], 'priority': comps[5]} return ret def swapon(name, priority=None): ''' Activate a swap disk CLI Example: .. code-block:: bash salt '*' mount.swapon /root/swapfile ''' ret = {} on_ = swaps() if name in on_: ret['stats'] = on_[name] ret['new'] = False return ret cmd = 'swapon {0}'.format(name) if priority: cmd += ' -p {0}'.format(priority) __salt__['cmd.run'](cmd) on_ = swaps() if name in on_: ret['stats'] = on_[name] ret['new'] = True return ret return ret def swapoff(name): ''' Deactivate a named swap mount CLI Example: .. code-block:: bash salt '*' mount.swapoff /root/swapfile ''' on_ = swaps() if name in on_: if __grains__['os'] != 'OpenBSD': __salt__['cmd.run']('swapoff {0}'.format(name)) else: __salt__['cmd.run']('swapctl -d {0}'.format(name)) on_ = swaps() if name in on_: return False return True return None def is_mounted(name): ''' .. versionadded:: 2014.7.0 Provide information if the path is mounted CLI Example: .. code-block:: bash salt '*' mount.is_mounted /mnt/share ''' active_ = active() if name in active_: return True else: return False
30.109264
92
0.473651
2,712
25,352
4.317847
0.115782
0.016909
0.013664
0.023057
0.610845
0.568488
0.541332
0.514774
0.486422
0.454142
0
0.011916
0.407463
25,352
841
93
30.145065
0.767608
0.141093
0
0.617486
0
0.001821
0.086969
0.002358
0
0
0
0
0
1
0.041894
false
0.010929
0.014572
0
0.134791
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a2004bf04417c6b520430e6ac9ec351a3c37f83
9,312
py
Python
wxpy/bot.py
daimajia/wxpy
2b56fb67b9ccb072538fd778a27a8fef8d9c93e6
[ "MIT" ]
34
2017-03-01T06:32:04.000Z
2021-11-16T12:48:46.000Z
wxpy/bot.py
daimajia/wxpy
2b56fb67b9ccb072538fd778a27a8fef8d9c93e6
[ "MIT" ]
null
null
null
wxpy/bot.py
daimajia/wxpy
2b56fb67b9ccb072538fd778a27a8fef8d9c93e6
[ "MIT" ]
17
2017-03-01T08:41:22.000Z
2021-09-16T06:25:43.000Z
import traceback from pprint import pformat from threading import Thread import itchat import logging from wxpy.chat import Chat from wxpy.chats import Chats from wxpy.friend import Friend from wxpy.group import Group from wxpy.message import MessageConfigs, Messages, Message, MessageConfig from wxpy.mp import MP from wxpy.response import ResponseError from wxpy.user import User from wxpy.utils.constants import SYSTEM from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list logger = logging.getLogger('wxpy') class Robot(object): """ 机器人对象,用于登陆和操作微信账号,涵盖大部分 Web 微信的功能 """ def __init__( self, save_path=None, console_qr=False, qr_path=None, qr_callback=None, login_callback=None, logout_callback=None ): """ :param save_path: | 用于保存或载入登陆状态的文件路径,例如: 'wxpy.pkl',为空则不尝试载入。 | 填写本参数后,可在短时间内重新载入登陆状态,避免重复扫码,失效时会重新要求登陆 :param console_qr: 在终端中显示登陆二维码,需要安装 Pillow 模块 :param qr_path: 保存二维码的路径 :param qr_callback: 获得二维码时的回调,接收参数: uuid, status, qrcode :param login_callback: 登陆时的回调,接收参数同上 :param logout_callback: 登出时的回调,接收参数同上 """ self.core = itchat.Core() itchat.instanceList.append(self) self.core.auto_login( hotReload=bool(save_path), statusStorageDir=save_path, enableCmdQR=console_qr, picDir=qr_path, qrCallback=qr_callback, loginCallback=login_callback, exitCallback=logout_callback ) self.message_configs = MessageConfigs(self) self.messages = Messages(robot=self) self.file_helper = Chat(wrap_user_name('filehelper')) self.file_helper.robot = self self.file_helper.nick_name = '文件传输助手' self.self = Chat(self.core.loginInfo['User']) self.self.robot = self self.save_path = save_path def __repr__(self): return '<{}: {}>'.format(self.__class__.__name__, self.self.name) @handle_response() def logout(self): """ 登出当前账号 """ return self.core.logout() @property def alive(self): """ 当前的登陆状态 :return: 若为登陆状态,则为 True,否则为 False """ return self.core.alive @alive.setter def alive(self, value): self.core.alive = value def dump_login_status(self, save_path=None): return self.core.dump_login_status(save_path or self.save_path) # chats def except_self(self, chats_or_dicts): """ 从聊天对象合集或用户字典列表中排除自身 :param chats_or_dicts: 聊天对象合集或用户字典列表 :return: 排除自身后的列表 """ return list(filter(lambda x: get_user_name(x) != self.self.user_name, chats_or_dicts)) def chats(self, update=False): """ 获取所有聊天对象 :param update: 是否更新 :return: 聊天对象合集 """ return Chats(self.friends(update) + self.groups(update) + self.mps(update), self) def friends(self, update=False): """ 获取所有好友 :param update: 是否更新 :return: 聊天对象合集 """ @handle_response(Friend) def do(): return self.core.get_friends(update=update) ret = do() ret.source = self return ret @handle_response(Group) def groups(self, update=False, contact_only=False): """ 获取所有群聊 :param update: 是否更新 :param contact_only: 是否限于保存为联系人的群聊 :return: 群聊合集 """ return self.core.get_chatrooms(update=update, contactOnly=contact_only) @handle_response(MP) def mps(self, update=False): """ 获取所有公众号 :param update: 是否更新 :return: 聊天对象合集 """ return self.core.get_mps(update=update) @handle_response(User) def user_details(self, user_or_users, chunk_size=50): """ 获取单个或批量获取多个用户的详细信息(地区、性别、签名等),但不可用于群聊成员 :param user_or_users: 单个或多个用户对象或 user_name :param chunk_size: 分配请求时的单批数量,目前为 50 :return: 单个或多个用户用户的详细信息 """ def chunks(): total = ensure_list(user_or_users) for i in range(0, len(total), chunk_size): yield total[i:i + chunk_size] @handle_response() def process_one_chunk(_chunk): return self.core.update_friend(userName=get_user_name(_chunk)) if isinstance(user_or_users, (list, tuple)): ret = list() for chunk in chunks(): chunk_ret = process_one_chunk(chunk) if isinstance(chunk_ret, list): ret += chunk_ret else: ret.append(chunk_ret) return ret else: return process_one_chunk(user_or_users) def search(self, name=None, **attributes): """ 在所有类型的聊天对象中进行搜索 :param name: 名称 (可以是昵称、备注等) :param attributes: 属性键值对,键可以是 sex(性别), province(省份), city(城市) 等。例如可指定 province='广东' :return: 匹配的聊天对象合集 """ return self.chats().search(name, **attributes) # add / create @handle_response() def add_friend(self, user, verify_content=''): """ 添加用户为好友 :param user: 用户对象或用户名 :param verify_content: 验证说明信息 """ return self.core.add_friend( userName=get_user_name(user), status=2, verifyContent=verify_content, autoUpdate=True ) @handle_response() def accept_friend(self, user, verify_content=''): """ 接受用户为好友 :param user: 用户对象或用户名 :param verify_content: 验证说明信息 """ # Todo: 验证好友接口可用性,并在接受好友时直接返回新好友 return self.core.add_friend( userName=get_user_name(user), status=3, verifyContent=verify_content, autoUpdate=True ) def create_group(self, users, topic=None): """ 创建一个新的群聊 :param users: 用户列表 :param topic: 群名称 :return: 若建群成功,返回一个新的群聊对象 """ @handle_response() def request(): return self.core.create_chatroom( memberList=wrap_user_name(users), topic=topic or '' ) ret = request() user_name = ret.get('ChatRoomName') if user_name: return Group(self.core.update_chatroom(userName=user_name)) else: raise ResponseError('Failed to create group:\n{}'.format(pformat(ret))) # messages def _process_message(self, msg): """ 处理接收到的消息 """ if not self.alive: return func, run_async = self.message_configs.get_func(msg) if not func: return def process(): # noinspection PyBroadException try: ret = func(msg) if ret is not None: if isinstance(ret, (tuple, list)): self.core.send( msg=str(ret[0]), toUserName=msg.chat.user_name, mediaId=ret[1] ) else: self.core.send( msg=str(ret), toUserName=msg.chat.user_name ) except: logger.warning( 'An error occurred in registered function, ' 'use `Robot().start(debug=True)` to show detailed information') logger.debug(traceback.format_exc()) if run_async: Thread(target=process).start() else: process() def register( self, chats=None, msg_types=None, except_self=True, run_async=True, enabled=True ): """ 装饰器:用于注册消息配置 :param chats: 单个或列表形式的多个聊天对象或聊天类型,为空时匹配所有聊天对象 :param msg_types: 单个或列表形式的多个消息类型,为空时匹配所有消息类型 (SYSTEM 类消息除外) :param except_self: 排除自己在手机上发送的消息 :param run_async: 异步执行配置的函数,可提高响应速度 :param enabled: 当前配置的默认开启状态,可事后动态开启或关闭 """ def register(func): self.message_configs.append(MessageConfig( robot=self, func=func, chats=chats, msg_types=msg_types, except_self=except_self, run_async=run_async, enabled=enabled )) return func return register def start(self, block=True): """ 开始监听和处理消息 :param block: 是否堵塞线程,为 False 时将在新的线程中运行 """ def listen(): logger.info('{} Auto-reply started.'.format(self)) try: while self.alive: msg = Message(self.core.msgList.get(), self) if msg.type is not SYSTEM: self.messages.append(msg) self._process_message(msg) except KeyboardInterrupt: logger.info('KeyboardInterrupt received, ending...') self.alive = False if self.core.useHotReload: self.dump_login_status() logger.info('Bye.') if block: listen() else: t = Thread(target=listen, daemon=True) t.start()
27.22807
94
0.560997
975
9,312
5.190769
0.284103
0.030034
0.027663
0.012448
0.113812
0.058091
0.036752
0.020549
0.020549
0.020549
0
0.001475
0.344609
9,312
341
95
27.307918
0.82779
0.166774
0
0.162921
0
0
0.033395
0.003821
0
0
0
0.002933
0
1
0.146067
false
0
0.08427
0.02809
0.359551
0.005618
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a206c0ba5cec93f4c2890bee22ea35305190260
1,477
py
Python
readthedocs/settings/proxito/base.py
rffontenelle/readthedocs.org
a7a9072215551156b9ddc22280cc085944eaa4b0
[ "MIT" ]
null
null
null
readthedocs/settings/proxito/base.py
rffontenelle/readthedocs.org
a7a9072215551156b9ddc22280cc085944eaa4b0
[ "MIT" ]
null
null
null
readthedocs/settings/proxito/base.py
rffontenelle/readthedocs.org
a7a9072215551156b9ddc22280cc085944eaa4b0
[ "MIT" ]
null
null
null
""" Base settings for Proxito Some of these settings will eventually be backported into the main settings file, but currently we have them to be able to run the site with the old middleware for a staged rollout of the proxito code. """ class CommunityProxitoSettingsMixin: ROOT_URLCONF = 'readthedocs.proxito.urls' USE_SUBDOMAIN = True SECURE_REFERRER_POLICY = "no-referrer-when-downgrade" # Allow cookies from cross-site requests on subdomains for now. # As 'Lax' breaks when the page is embedded in an iframe. SESSION_COOKIE_SAMESITE = None @property def DATABASES(self): # This keeps connections to the DB alive, # which reduces latency with connecting to postgres dbs = getattr(super(), 'DATABASES', {}) for db in dbs: dbs[db]['CONN_MAX_AGE'] = 86400 return dbs @property def MIDDLEWARE(self): # noqa # Use our new middleware instead of the old one classes = super().MIDDLEWARE classes = list(classes) classes.append('readthedocs.proxito.middleware.ProxitoMiddleware') middleware_to_remove = ( 'csp.middleware.CSPMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) for mw in middleware_to_remove: if mw in classes: classes.remove(mw) else: log.warning('Failed to remove middleware: %s', mw) return classes
31.425532
81
0.65606
177
1,477
5.40678
0.627119
0.025078
0.037618
0
0
0
0
0
0
0
0
0.00466
0.273527
1,477
46
82
32.108696
0.887232
0.330399
0
0.076923
0
0
0.237705
0.184426
0
0
0
0
0
1
0.076923
false
0
0
0
0.346154
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a20872ac762ad5db9d06e05df401ef72a6b24c6
69,998
py
Python
model_selection/tests/test_search.py
jessica-tu/jupyter
917e02bc29e0fa06bd8adb25fe5388ac381ec829
[ "PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
model_selection/tests/test_search.py
jessica-tu/jupyter
917e02bc29e0fa06bd8adb25fe5388ac381ec829
[ "PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
model_selection/tests/test_search.py
jessica-tu/jupyter
917e02bc29e0fa06bd8adb25fe5388ac381ec829
[ "PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert grid_search_no_score.best_params_ == grid_search.best_params_ # check that we can call score and that it gives the correct result assert grid_search.score(X, y) == grid_search_no_score.score(X, y) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc' ).fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = search_no_scoring.score(X, y) score_accuracy = search_accuracy.score(X, y) score_no_score_auc = search_no_score_method_auc.score(X, y) score_auc = search_auc.score(X, y) # ensure the test is sane assert score_auc < 1.0 assert score_accuracy < 1.0 assert score_auc != score_accuracy assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_grid_search_groups(): # Check if ValueError (when groups is None) propagates to GridSearchCV # And also check if groups is correctly passed to the cv object rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 3, 15) clf = LinearSVC(random_state=0) grid = {'C': [1]} group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(n_splits=3), GroupShuffleSplit()] for cv in group_cvs: gs = GridSearchCV(clf, grid, cv=cv) assert_raise_message(ValueError, "The 'groups' parameter should not be None.", gs.fit, X, y) gs.fit(X, y, groups=groups) non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] for cv in non_group_cvs: gs = GridSearchCV(clf, grid, cv=cv) # Should not raise an error gs.fit(X, y) def test_classes__property(): # Test that classes_ property matches best_estimator_.classes_ X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) Cs = [.1, 1, 10] grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) grid_search.fit(X, y) assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_) # Test that regressors do not have a classes_ attribute grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]}) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute before it's fit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute without a refit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}, refit=False) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') def test_trivial_cv_results_attr(): # Test search over a "grid" with only one point. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3) grid_search.fit(X, y) assert hasattr(grid_search, "cv_results_") random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3) random_search.fit(X, y) assert hasattr(grid_search, "cv_results_") def test_no_refit(): # Test that GSCV can be used for model selection alone without refitting clf = MockClassifier() for scoring in [None, ['accuracy', 'precision']]: grid_search = GridSearchCV( clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3 ) grid_search.fit(X, y) assert not hasattr(grid_search, "best_estimator_") and \ hasattr(grid_search, "best_index_") and \ hasattr(grid_search, "best_params_") # Make sure the functions predict/transform etc raise meaningful # error messages for fn_name in ('predict', 'predict_proba', 'predict_log_proba', 'transform', 'inverse_transform'): assert_raise_message(NotFittedError, ('refit=False. %s is available only after ' 'refitting on the best parameters' % fn_name), getattr(grid_search, fn_name), X) # Test that an invalid refit param raises appropriate error messages for refit in ["", 5, True, 'recall', 'accuracy']: assert_raise_message(ValueError, "For multi-metric scoring, the " "parameter refit must be set to a scorer key", GridSearchCV(clf, {}, refit=refit, scoring={'acc': 'accuracy', 'prec': 'precision'} ).fit, X, y) def test_grid_search_error(): # Test that grid search will capture errors on data with different length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC(gamma='auto') cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_when_param_grid_includes_range(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = None grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3) grid_search.fit(X, y) assert grid_search.best_estimator_.foo_param == 2 def test_grid_search_bad_param_grid(): param_dict = {"C": 1} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'int'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raise_message( ValueError, "Parameter values for parameter (C) need to be a non-empty sequence.", GridSearchCV, clf, param_dict) param_dict = {"C": "1,2,3"} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'str'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": np.ones((3, 2))} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert np.mean(y_pred == y_pred2) >= .9 assert C == C2 def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert C == C2 # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert C == C3 assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert cv.best_score_ >= 0 # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert np.mean(y_pred == y_test) >= 0 # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10, )) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert not hasattr(self, 'has_been_fit_') self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) @ignore_warnings def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_refit_callable(): """ Test refit=callable, which adds flexibility in identifying the "best" estimator. """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_score`. """ # Fit a dummy clf with `refit=True` to get a list of keys in # clf.cv_results_. X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=True) clf.fit(X, y) # Ensure that `best_index_ != 0` for this dummy clf assert clf.best_index_ != 0 # Assert every key matches those in `cv_results` for key in clf.cv_results_.keys(): assert key in cv_results return cv_results['mean_test_score'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_refit_callable_invalid_type(): """ Test implementation catches the errors when 'best_index_' returns an invalid result. """ def refit_callable_invalid_type(cv_results): """ A dummy function tests when returned 'best_index_' is not integer. """ return None X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_invalid_type) with pytest.raises(TypeError, match='best_index_ returned is not an integer'): clf.fit(X, y) @pytest.mark.parametrize('out_bound_value', [-1, 2]) @pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV]) def test_refit_callable_out_bound(out_bound_value, search_cv): """ Test implementation catches the errors when 'best_index_' returns an out of bound result. """ def refit_callable_out_bound(cv_results): """ A dummy function tests when returned 'best_index_' is out of bounds. """ return out_bound_value X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_out_bound) with pytest.raises(IndexError, match='best_index_ index out of range'): clf.fit(X, y) def test_refit_callable_multi_metric(): """ Test refit=callable in multiple metric evaluation setting """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_prec`. """ assert 'mean_test_prec' in cv_results return cv_results['mean_test_prec'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'} clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring=scoring, refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier( check_X=check_X, check_y=check_y, methods_to_check=["fit"], ) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert hasattr(grid_search, "cv_results_") def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_X=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert hasattr(grid_search, "cv_results_") def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_y=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert hasattr(grid_search, "cv_results_") @ignore_warnings def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) def check_df(x): return isinstance(x, InputFeatureType) def check_series(x): return isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert hasattr(grid_search, "cv_results_") def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(n_samples=50, random_state=0) km = KMeans(random_state=0, init="random", n_init=1) # Multi-metric evaluation unsupervised scoring = ['adjusted_rand_score', 'fowlkes_mallows_score'] for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']: grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit) grid_search.fit(X, y) # Both ARI and FMS can find the right number :) assert grid_search.best_params_["n_clusters"] == 3 # Single metric evaluation unsupervised grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='fowlkes_mallows_score') grid_search.fit(X, y) assert grid_search.best_params_["n_clusters"] == 3 # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert grid_search.best_params_["n_clusters"] == 4 def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert search.best_params_['bandwidth'] == .1 assert search.best_score_ == 42 def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert len(samples) == 10 for sample in samples: assert sample["kernel"] in ["rbf", "linear"] assert 0 <= sample["C"] <= 1 # test that repeated calls yield identical parameters param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=3, random_state=0) assert [x for x in sampler] == [x for x in sampler] if sp_version >= (0, 16): param_distributions = {"C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) assert [x for x in sampler] == [x for x in sampler] def check_cv_results_array_types(search, param_keys, score_keys): # Check if the search `cv_results`'s array are of correct types cv_results = search.cv_results_ assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys) assert all(cv_results[key].dtype == object for key in param_keys) assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys) assert all(cv_results[key].dtype == np.float64 for key in score_keys if not key.startswith('rank')) scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score'] for key in scorer_keys: assert cv_results['rank_test_%s' % key].dtype == np.int32 def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand): # Test the search.cv_results_ contains all the required results assert_array_equal(sorted(cv_results.keys()), sorted(param_keys + score_keys + ('params',))) assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys) def test_grid_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_grid_points = 6 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_candidates = n_grid_points search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check if score and timing are reasonable assert all(cv_results['rank_test_score'] >= 1) assert (all(cv_results[k] >= 0) for k in score_keys if k != 'rank_test_score') assert (all(cv_results[k] <= 1) for k in score_keys if 'time' not in k and k != 'rank_test_score') # Check cv_results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) # Check masking cv_results = search.cv_results_ n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') def test_random_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 params = [{'kernel': ['rbf'], 'C': expon(scale=10), 'gamma': expon(scale=0.1)}, {'kernel': ['poly'], 'degree': [2, 3]}] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_cand = n_search_iter search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_splits, param_distributions=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_cand) n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') @pytest.mark.parametrize( "SearchCV, specialized_params", [(GridSearchCV, {'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {'param_distributions': {'C': [1, 10]}, 'n_iter': 2})] ) def test_search_default_iid(SearchCV, specialized_params): # Test the IID parameter TODO: Clearly this test does something else??? # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask = np.ones(X.shape[0], dtype=np.bool) mask[np.where(y == 1)[0][::2]] = 0 mask[np.where(y == 2)[0][::2]] = 0 # this leads to perfect classification on one fold and a score of 1/3 on # the other # create "cv" for splits cv = [[mask, ~mask], [~mask, mask]] common_params = {'estimator': SVC(), 'cv': cv, 'return_train_score': True} search = SearchCV(**common_params, **specialized_params) search.fit(X, y) test_cv_scores = np.array( [search.cv_results_['split%d_test_score' % s][0] for s in range(search.n_splits_)] ) test_mean = search.cv_results_['mean_test_score'][0] test_std = search.cv_results_['std_test_score'][0] train_cv_scores = np.array( [search.cv_results_['split%d_train_score' % s][0] for s in range(search.n_splits_)] ) train_mean = search.cv_results_['mean_train_score'][0] train_std = search.cv_results_['std_train_score'][0] assert search.cv_results_['param_C'][0] == 1 # scores are the same as above assert_allclose(test_cv_scores, [1, 1. / 3.]) assert_allclose(train_cv_scores, [1, 1]) # Unweighted mean/std is used assert test_mean == pytest.approx(np.mean(test_cv_scores)) assert test_std == pytest.approx(np.std(test_cv_scores)) # For the train scores, we do not take a weighted mean irrespective of # i.i.d. or not assert train_mean == pytest.approx(1) assert train_std == pytest.approx(0) def test_grid_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] grid_searches = [] for scoring in ({'accuracy': make_scorer(accuracy_score), 'recall': make_scorer(recall_score)}, 'accuracy', 'recall'): grid_search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, scoring=scoring, refit=False) grid_search.fit(X, y) grid_searches.append(grid_search) compare_cv_results_multimetric_with_single(*grid_searches) def test_random_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 # Scipy 0.12's stats dists do not accept seed, hence we use param grid params = dict(C=np.logspace(-4, 1, 3), gamma=np.logspace(-5, 0, 3, base=0.1)) for refit in (True, False): random_searches = [] for scoring in (('accuracy', 'recall'), 'accuracy', 'recall'): # If True, for multi-metric pass refit='accuracy' if refit: probability = True refit = 'accuracy' if isinstance(scoring, tuple) else refit else: probability = False clf = SVC(probability=probability, random_state=42) random_search = RandomizedSearchCV(clf, n_iter=n_search_iter, cv=n_splits, param_distributions=params, scoring=scoring, refit=refit, random_state=0) random_search.fit(X, y) random_searches.append(random_search) compare_cv_results_multimetric_with_single(*random_searches) compare_refit_methods_when_refit_with_acc( random_searches[0], random_searches[1], refit) def compare_cv_results_multimetric_with_single( search_multi, search_acc, search_rec): """Compare multi-metric cv_results with the ensemble of multiple single metric cv_results from single metric grid/random search""" assert search_multi.multimetric_ assert_array_equal(sorted(search_multi.scorer_), ('accuracy', 'recall')) cv_results_multi = search_multi.cv_results_ cv_results_acc_rec = {re.sub('_score$', '_accuracy', k): v for k, v in search_acc.cv_results_.items()} cv_results_acc_rec.update({re.sub('_score$', '_recall', k): v for k, v in search_rec.cv_results_.items()}) # Check if score and timing are reasonable, also checks if the keys # are present assert all((np.all(cv_results_multi[k] <= 1) for k in ( 'mean_score_time', 'std_score_time', 'mean_fit_time', 'std_fit_time'))) # Compare the keys, other than time keys, among multi-metric and # single metric grid search results. np.testing.assert_equal performs a # deep nested comparison of the two cv_results dicts np.testing.assert_equal({k: v for k, v in cv_results_multi.items() if not k.endswith('_time')}, {k: v for k, v in cv_results_acc_rec.items() if not k.endswith('_time')}) def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit): """Compare refit multi-metric search methods with single metric methods""" assert search_acc.refit == refit if refit: assert search_multi.refit == 'accuracy' else: assert not search_multi.refit return # search cannot predict/score without refit X, y = make_blobs(n_samples=100, n_features=4, random_state=42) for method in ('predict', 'predict_proba', 'predict_log_proba'): assert_almost_equal(getattr(search_multi, method)(X), getattr(search_acc, method)(X)) assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y)) for key in ('best_index_', 'best_score_', 'best_params_'): assert getattr(search_multi, key) == getattr(search_acc, key) def test_search_cv_results_rank_tie_breaking(): X, y = make_blobs(n_samples=50, random_state=42) # The two C values are close enough to give similar models # which would result in a tie of their mean cv-scores param_grid = {'C': [1, 1.001, 0.001]} grid_search = GridSearchCV(SVC(), param_grid=param_grid, return_train_score=True) random_search = RandomizedSearchCV(SVC(), n_iter=3, param_distributions=param_grid, return_train_score=True) for search in (grid_search, random_search): search.fit(X, y) cv_results = search.cv_results_ # Check tie breaking strategy - # Check that there is a tie in the mean scores between # candidates 1 and 2 alone assert_almost_equal(cv_results['mean_test_score'][0], cv_results['mean_test_score'][1]) assert_almost_equal(cv_results['mean_train_score'][0], cv_results['mean_train_score'][1]) assert not np.allclose(cv_results['mean_test_score'][1], cv_results['mean_test_score'][2]) assert not np.allclose(cv_results['mean_train_score'][1], cv_results['mean_train_score'][2]) # 'min' rank should be assigned to the tied candidates assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3]) def test_search_cv_results_none_param(): X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1] estimators = (DecisionTreeRegressor(), DecisionTreeClassifier()) est_parameters = {"random_state": [0, None]} cv = KFold() for est in estimators: grid_search = GridSearchCV(est, est_parameters, cv=cv, ).fit(X, y) assert_array_equal(grid_search.cv_results_['param_random_state'], [0, None]) @ignore_warnings() def test_search_cv_timing(): svc = LinearSVC(random_state=0) X = [[1, ], [2, ], [3, ], [4, ]] y = [0, 1, 1, 0] gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0) rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2) for search in (gs, rs): search.fit(X, y) for key in ['mean_fit_time', 'std_fit_time']: # NOTE The precision of time.time in windows is not high # enough for the fit/score times to be non-zero for trivial X and y assert np.all(search.cv_results_[key] >= 0) assert np.all(search.cv_results_[key] < 1) for key in ['mean_score_time', 'std_score_time']: assert search.cv_results_[key][1] >= 0 assert search.cv_results_[key][0] == 0.0 assert np.all(search.cv_results_[key] < 1) assert hasattr(search, "refit_time_") assert isinstance(search.refit_time_, float) assert search.refit_time_ >= 0 def test_grid_search_correct_score_results(): # test that correct scores are used n_splits = 3 clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] for score in ['f1', 'roc_auc']: grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits) cv_results = grid_search.fit(X, y).cv_results_ # Test scorer names result_keys = list(cv_results.keys()) expected_keys = (("mean_test_score", "rank_test_score") + tuple("split%d_test_score" % cv_i for cv_i in range(n_splits))) assert all(np.in1d(expected_keys, result_keys)) cv = StratifiedKFold(n_splits=n_splits) n_splits = grid_search.n_splits_ for candidate_i, C in enumerate(Cs): clf.set_params(C=C) cv_scores = np.array( list(grid_search.cv_results_['split%d_test_score' % s][candidate_i] for s in range(n_splits))) for i, (train, test) in enumerate(cv.split(X, y)): clf.fit(X[train], y[train]) if score == "f1": correct_score = f1_score(y[test], clf.predict(X[test])) elif score == "roc_auc": dec = clf.decision_function(X[test]) correct_score = roc_auc_score(y[test], dec) assert_almost_equal(correct_score, cv_scores[i]) # FIXME remove test_fit_grid_point as the function will be removed on 0.25 @ignore_warnings(category=FutureWarning) def test_fit_grid_point(): X, y = make_classification(random_state=0) cv = StratifiedKFold() svc = LinearSVC(random_state=0) scorer = make_scorer(accuracy_score) for params in ({'C': 0.1}, {'C': 0.01}, {'C': 0.001}): for train, test in cv.split(X, y): this_scores, this_params, n_test_samples = fit_grid_point( X, y, clone(svc), params, train, test, scorer, verbose=False) est = clone(svc).set_params(**params) est.fit(X[train], y[train]) expected_score = scorer(est, X[test], y[test]) # Test the return values of fit_grid_point assert_almost_equal(this_scores, expected_score) assert params == this_params assert n_test_samples == test.size # Should raise an error upon multimetric scorer assert_raise_message(ValueError, "For evaluating multiple scores, use " "sklearn.model_selection.cross_validate instead.", fit_grid_point, X, y, svc, params, train, test, {'score': scorer}, verbose=True) # FIXME remove test_fit_grid_point_deprecated as # fit_grid_point will be removed on 0.25 def test_fit_grid_point_deprecated(): X, y = make_classification(random_state=0) svc = LinearSVC(random_state=0) scorer = make_scorer(accuracy_score) msg = ("fit_grid_point is deprecated in version 0.23 " "and will be removed in version 0.25") params = {'C': 0.1} train, test = next(StratifiedKFold().split(X, y)) with pytest.warns(FutureWarning, match=msg): fit_grid_point(X, y, svc, params, train, test, scorer, verbose=False) def test_pickle(): # Test that a fit search can be pickled clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, cv=3) grid_search.fit(X, y) grid_search_pickled = pickle.loads(pickle.dumps(grid_search)) assert_array_almost_equal(grid_search.predict(X), grid_search_pickled.predict(X)) random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, n_iter=3, cv=3) random_search.fit(X, y) random_search_pickled = pickle.loads(pickle.dumps(random_search)) assert_array_almost_equal(random_search.predict(X), random_search_pickled.predict(X)) def test_grid_search_with_multioutput_data(): # Test search with multi-output estimator X, y = make_multilabel_classification(return_indicator=True, random_state=0) est_parameters = {"max_depth": [1, 2, 3, 4]} cv = KFold() estimators = [DecisionTreeRegressor(random_state=0), DecisionTreeClassifier(random_state=0)] # Test with grid search cv for est in estimators: grid_search = GridSearchCV(est, est_parameters, cv=cv) grid_search.fit(X, y) res_params = grid_search.cv_results_['params'] for cand_i in range(len(res_params)): est.set_params(**res_params[cand_i]) for i, (train, test) in enumerate(cv.split(X, y)): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal( correct_score, grid_search.cv_results_['split%d_test_score' % i][cand_i]) # Test with a randomized search for est in estimators: random_search = RandomizedSearchCV(est, est_parameters, cv=cv, n_iter=3) random_search.fit(X, y) res_params = random_search.cv_results_['params'] for cand_i in range(len(res_params)): est.set_params(**res_params[cand_i]) for i, (train, test) in enumerate(cv.split(X, y)): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal( correct_score, random_search.cv_results_['split%d_test_score' % i][cand_i]) def test_predict_proba_disabled(): # Test predict_proba when disabled on estimator. X = np.arange(20).reshape(5, -1) y = [0, 0, 1, 1, 1] clf = SVC(probability=False) gs = GridSearchCV(clf, {}, cv=2).fit(X, y) assert not hasattr(gs, "predict_proba") def test_grid_search_allows_nans(): # Test GridSearchCV with SimpleImputer X = np.arange(20, dtype=np.float64).reshape(5, -1) X[2, :] = np.nan y = [0, 0, 1, 1, 1] p = Pipeline([ ('imputer', SimpleImputer(strategy='mean', missing_values=np.nan)), ('classifier', MockClassifier()), ]) GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y) class FailingClassifier(BaseEstimator): """Classifier that raises a ValueError on fit()""" FAILING_PARAMETER = 2 def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y=None): if self.parameter == FailingClassifier.FAILING_PARAMETER: raise ValueError("Failing classifier failed as required") def predict(self, X): return np.zeros(X.shape[0]) def score(self, X=None, Y=None): return 0. def test_grid_search_failing_classifier(): # GridSearchCV with on_error != 'raise' # Ensures that a warning is raised and score reset where appropriate. X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we only want to check that errors caused by fits # to individual folds will be caught and warnings raised instead. If # refit was done, then an exception would be raised on refit and not # caught by grid_search (expected behavior), and this would cause an # error in this test. gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=0.0) assert_warns(FitFailedWarning, gs.fit, X, y) n_candidates = len(gs.cv_results_['params']) # Ensure that grid scores were set to zero as required for those fits # that are expected to fail. def get_cand_scores(i): return np.array(list(gs.cv_results_['split%d_test_score' % s][i] for s in range(gs.n_splits_))) assert all((np.all(get_cand_scores(cand_i) == 0.0) for cand_i in range(n_candidates) if gs.cv_results_['param_parameter'][cand_i] == FailingClassifier.FAILING_PARAMETER)) gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=float('nan')) assert_warns(FitFailedWarning, gs.fit, X, y) n_candidates = len(gs.cv_results_['params']) assert all(np.all(np.isnan(get_cand_scores(cand_i))) for cand_i in range(n_candidates) if gs.cv_results_['param_parameter'][cand_i] == FailingClassifier.FAILING_PARAMETER) ranks = gs.cv_results_['rank_test_score'] # Check that succeeded estimators have lower ranks assert ranks[0] <= 2 and ranks[1] <= 2 # Check that failed estimator has the highest rank assert ranks[clf.FAILING_PARAMETER] == 3 assert gs.best_index_ != clf.FAILING_PARAMETER def test_grid_search_failing_classifier_raise(): # GridSearchCV with on_error == 'raise' raises the error X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we want to test the behaviour of the grid search part gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score='raise') # FailingClassifier issues a ValueError so this is what we look for. assert_raises(ValueError, gs.fit, X, y) def test_parameters_sampler_replacement(): # raise warning if n_iter is bigger than total parameter space params = [{'first': [0, 1], 'second': ['a', 'b', 'c']}, {'third': ['two', 'values']}] sampler = ParameterSampler(params, n_iter=9) n_iter = 9 grid_size = 8 expected_warning = ('The total space of parameters %d is smaller ' 'than n_iter=%d. Running %d iterations. For ' 'exhaustive searches, use GridSearchCV.' % (grid_size, n_iter, grid_size)) assert_warns_message(UserWarning, expected_warning, list, sampler) # degenerates to GridSearchCV if n_iter the same as grid_size sampler = ParameterSampler(params, n_iter=8) samples = list(sampler) assert len(samples) == 8 for values in ParameterGrid(params): assert values in samples # test sampling without replacement in a large grid params = {'a': range(10), 'b': range(10), 'c': range(10)} sampler = ParameterSampler(params, n_iter=99, random_state=42) samples = list(sampler) assert len(samples) == 99 hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c']) for p in samples] assert len(set(hashable_samples)) == 99 # doesn't go into infinite loops params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']} sampler = ParameterSampler(params_distribution, n_iter=7) samples = list(sampler) assert len(samples) == 7 def test_stochastic_gradient_loss_param(): # Make sure the predict_proba works when loss is specified # as one of the parameters in the param_grid. param_grid = { 'loss': ['log'], } X = np.arange(24).reshape(6, -1) y = [0, 0, 0, 1, 1, 1] clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'), param_grid=param_grid, cv=3) # When the estimator is not fitted, `predict_proba` is not available as the # loss is 'hinge'. assert not hasattr(clf, "predict_proba") clf.fit(X, y) clf.predict_proba(X) clf.predict_log_proba(X) # Make sure `predict_proba` is not available when setting loss=['hinge'] # in param_grid param_grid = { 'loss': ['hinge'], } clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'), param_grid=param_grid, cv=3) assert not hasattr(clf, "predict_proba") clf.fit(X, y) assert not hasattr(clf, "predict_proba") def test_search_train_scores_set_to_false(): X = np.arange(6).reshape(6, -1) y = [0, 0, 0, 1, 1, 1] clf = LinearSVC(random_state=0) gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]}, cv=3) gs.fit(X, y) def test_grid_search_cv_splits_consistency(): # Check if a one time iterable is accepted as a cv parameter. n_samples = 100 n_splits = 5 X, y = make_classification(n_samples=n_samples, random_state=0) gs = GridSearchCV(LinearSVC(random_state=0), param_grid={'C': [0.1, 0.2, 0.3]}, cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples), return_train_score=True) gs.fit(X, y) gs2 = GridSearchCV(LinearSVC(random_state=0), param_grid={'C': [0.1, 0.2, 0.3]}, cv=KFold(n_splits=n_splits), return_train_score=True) gs2.fit(X, y) # Give generator as a cv parameter assert isinstance(KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y), GeneratorType) gs3 = GridSearchCV(LinearSVC(random_state=0), param_grid={'C': [0.1, 0.2, 0.3]}, cv=KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y), return_train_score=True) gs3.fit(X, y) gs4 = GridSearchCV(LinearSVC(random_state=0), param_grid={'C': [0.1, 0.2, 0.3]}, cv=KFold(n_splits=n_splits, shuffle=True, random_state=0), return_train_score=True) gs4.fit(X, y) def _pop_time_keys(cv_results): for key in ('mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time'): cv_results.pop(key) return cv_results # Check if generators are supported as cv and # that the splits are consistent np.testing.assert_equal(_pop_time_keys(gs3.cv_results_), _pop_time_keys(gs4.cv_results_)) # OneTimeSplitter is a non-re-entrant cv where split can be called only # once if ``cv.split`` is called once per param setting in GridSearchCV.fit # the 2nd and 3rd parameter will not be evaluated as no train/test indices # will be generated for the 2nd and subsequent cv.split calls. # This is a check to make sure cv.split is not called once per param # setting. np.testing.assert_equal({k: v for k, v in gs.cv_results_.items() if not k.endswith('_time')}, {k: v for k, v in gs2.cv_results_.items() if not k.endswith('_time')}) # Check consistency of folds across the parameters gs = GridSearchCV(LinearSVC(random_state=0), param_grid={'C': [0.1, 0.1, 0.2, 0.2]}, cv=KFold(n_splits=n_splits, shuffle=True), return_train_score=True) gs.fit(X, y) # As the first two param settings (C=0.1) and the next two param # settings (C=0.2) are same, the test and train scores must also be # same as long as the same train/test indices are generated for all # the cv splits, for both param setting for score_type in ('train', 'test'): per_param_scores = {} for param_i in range(4): per_param_scores[param_i] = list( gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i] for s in range(5)) assert_array_almost_equal(per_param_scores[0], per_param_scores[1]) assert_array_almost_equal(per_param_scores[2], per_param_scores[3]) def test_transform_inverse_transform_round_trip(): clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) grid_search.fit(X, y) X_round_trip = grid_search.inverse_transform(grid_search.transform(X)) assert_array_equal(X, X_round_trip) def test_custom_run_search(): def check_results(results, gscv): exp_results = gscv.cv_results_ assert sorted(results.keys()) == sorted(exp_results) for k in results: if not k.endswith('_time'): # XXX: results['params'] is a list :| results[k] = np.asanyarray(results[k]) if results[k].dtype.kind == 'O': assert_array_equal(exp_results[k], results[k], err_msg='Checking ' + k) else: assert_allclose(exp_results[k], results[k], err_msg='Checking ' + k) def fit_grid(param_grid): return GridSearchCV(clf, param_grid, return_train_score=True).fit(X, y) class CustomSearchCV(BaseSearchCV): def __init__(self, estimator, **kwargs): super().__init__(estimator, **kwargs) def _run_search(self, evaluate): results = evaluate([{'max_depth': 1}, {'max_depth': 2}]) check_results(results, fit_grid({'max_depth': [1, 2]})) results = evaluate([{'min_samples_split': 5}, {'min_samples_split': 10}]) check_results(results, fit_grid([{'max_depth': [1, 2]}, {'min_samples_split': [5, 10]}])) # Using regressor to make sure each score differs clf = DecisionTreeRegressor(random_state=0) X, y = make_classification(n_samples=100, n_informative=4, random_state=0) mycv = CustomSearchCV(clf, return_train_score=True).fit(X, y) gscv = fit_grid([{'max_depth': [1, 2]}, {'min_samples_split': [5, 10]}]) results = mycv.cv_results_ check_results(results, gscv) for attr in dir(gscv): if (attr[0].islower() and attr[-1:] == '_' and attr not in {'cv_results_', 'best_estimator_', 'refit_time_', 'classes_'}): assert getattr(gscv, attr) == getattr(mycv, attr), \ "Attribute %s not equal" % attr def test__custom_fit_no_run_search(): class NoRunSearchSearchCV(BaseSearchCV): def __init__(self, estimator, **kwargs): super().__init__(estimator, **kwargs) def fit(self, X, y=None, groups=None, **fit_params): return self # this should not raise any exceptions NoRunSearchSearchCV(SVC()).fit(X, y) class BadSearchCV(BaseSearchCV): def __init__(self, estimator, **kwargs): super().__init__(estimator, **kwargs) with pytest.raises(NotImplementedError, match="_run_search not implemented."): # this should raise a NotImplementedError BadSearchCV(SVC()).fit(X, y) def test_empty_cv_iterator_error(): # Use global X, y # create cv cv = KFold(n_splits=3).split(X) # pop all of it, this should cause the expected ValueError [u for u in cv] # cv is empty now train_size = 100 ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4) # assert that this raises an error with pytest.raises(ValueError, match='No fits were performed. ' 'Was the CV iterator empty\\? ' 'Were there no candidates\\?'): ridge.fit(X[:train_size], y[:train_size]) def test_random_search_bad_cv(): # Use global X, y class BrokenKFold(KFold): def get_n_splits(self, *args, **kw): return 1 # create bad cv cv = BrokenKFold(n_splits=3) train_size = 100 ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4) # assert that this raises an error with pytest.raises(ValueError, match='cv.split and cv.get_n_splits returned ' 'inconsistent results. Expected \\d+ ' 'splits, got \\d+'): ridge.fit(X[:train_size], y[:train_size]) def test_n_features_in(): # make sure grid search and random search delegate n_features_in to the # best estimator n_features = 4 X, y = make_classification(n_features=n_features) gbdt = HistGradientBoostingClassifier() param_grid = {'max_iter': [3, 4]} gs = GridSearchCV(gbdt, param_grid) rs = RandomizedSearchCV(gbdt, param_grid, n_iter=1) assert not hasattr(gs, 'n_features_in_') assert not hasattr(rs, 'n_features_in_') gs.fit(X, y) rs.fit(X, y) assert gs.n_features_in_ == n_features assert rs.n_features_in_ == n_features def test_search_cv__pairwise_property_delegated_to_base_estimator(): """ Test implementation of BaseSearchCV has the _pairwise property which matches the _pairwise property of its estimator. This test make sure _pairwise is delegated to the base estimator. Non-regression test for issue #13920. """ est = BaseEstimator() attr_message = "BaseSearchCV _pairwise property must match estimator" for _pairwise_setting in [True, False]: setattr(est, '_pairwise', _pairwise_setting) cv = GridSearchCV(est, {'n_neighbors': [10]}) assert _pairwise_setting == cv._pairwise, attr_message def test_search_cv__pairwise_property_equivalence_of_precomputed(): """ Test implementation of BaseSearchCV has the _pairwise property which matches the _pairwise property of its estimator. This test ensures the equivalence of 'precomputed'. Non-regression test for issue #13920. """ n_samples = 50 n_splits = 2 X, y = make_classification(n_samples=n_samples, random_state=0) grid_params = {'n_neighbors': [10]} # defaults to euclidean metric (minkowski p = 2) clf = KNeighborsClassifier() cv = GridSearchCV(clf, grid_params, cv=n_splits) cv.fit(X, y) preds_original = cv.predict(X) # precompute euclidean metric to validate _pairwise is working X_precomputed = euclidean_distances(X) clf = KNeighborsClassifier(metric='precomputed') cv = GridSearchCV(clf, grid_params, cv=n_splits) cv.fit(X_precomputed, y) preds_precomputed = cv.predict(X_precomputed) attr_message = "GridSearchCV not identical with precomputed metric" assert (preds_original == preds_precomputed).all(), attr_message @pytest.mark.parametrize( "SearchCV, param_search", [(GridSearchCV, {'a': [0.1, 0.01]}), (RandomizedSearchCV, {'a': uniform(1, 3)})] ) def test_scalar_fit_param(SearchCV, param_search): # unofficially sanctioned tolerance for scalar values in fit_params # non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/15805 class TestEstimator(BaseEstimator, ClassifierMixin): def __init__(self, a=None): self.a = a def fit(self, X, y, r=None): self.r_ = r def predict(self, X): return np.zeros(shape=(len(X))) model = SearchCV(TestEstimator(), param_search) X, y = make_classification(random_state=42) model.fit(X, y, r=42) assert model.best_estimator_.r_ == 42 @pytest.mark.parametrize( "SearchCV, param_search", [(GridSearchCV, {'alpha': [0.1, 0.01]}), (RandomizedSearchCV, {'alpha': uniform(0.01, 0.1)})] ) def test_scalar_fit_param_compat(SearchCV, param_search): # check support for scalar values in fit_params, for instance in LightGBM # that do not exactly respect the scikit-learn API contract but that we do # not want to break without an explicit deprecation cycle and API # recommendations for implementing early stopping with a user provided # validation set. non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/15805 X_train, X_valid, y_train, y_valid = train_test_split( *make_classification(random_state=42), random_state=42 ) class _FitParamClassifier(SGDClassifier): def fit(self, X, y, sample_weight=None, tuple_of_arrays=None, scalar_param=None, callable_param=None): super().fit(X, y, sample_weight=sample_weight) assert scalar_param > 0 assert callable(callable_param) # The tuple of arrays should be preserved as tuple. assert isinstance(tuple_of_arrays, tuple) assert tuple_of_arrays[0].ndim == 2 assert tuple_of_arrays[1].ndim == 1 return self def _fit_param_callable(): pass model = SearchCV( _FitParamClassifier(), param_search ) # NOTE: `fit_params` should be data dependent (e.g. `sample_weight`) which # is not the case for the following parameters. But this abuse is common in # popular third-party libraries and we should tolerate this behavior for # now and be careful not to break support for those without following # proper deprecation cycle. fit_params = { 'tuple_of_arrays': (X_valid, y_valid), 'callable_param': _fit_param_callable, 'scalar_param': 42, } model.fit(X_train, y_train, **fit_params)
38.18767
79
0.629904
9,397
69,998
4.462594
0.087581
0.029617
0.008346
0.007082
0.466627
0.388291
0.342792
0.298581
0.273471
0.249696
0
0.022237
0.258622
69,998
1,832
80
38.208515
0.785837
0.151319
0
0.311344
0
0
0.085834
0.00207
0
0
0
0.001092
0.152856
1
0.085278
false
0.001609
0.051488
0.012872
0.168946
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a20fc9b93bd3fc7e19c79190d5875b049bc7526
4,136
py
Python
build/lib/FinMesh/usgov/__init__.py
johnjdailey/FinMesh
64048b02bfec1a24de840877b38e82f4fa813d22
[ "MIT" ]
1
2020-08-14T16:09:54.000Z
2020-08-14T16:09:54.000Z
build/lib/FinMesh/usgov/__init__.py
johnjdailey/FinMesh
64048b02bfec1a24de840877b38e82f4fa813d22
[ "MIT" ]
null
null
null
build/lib/FinMesh/usgov/__init__.py
johnjdailey/FinMesh
64048b02bfec1a24de840877b38e82f4fa813d22
[ "MIT" ]
null
null
null
import os import requests import xmltodict import csv import json # # # # # # # # # # # FRED DATA BELOW # # # # # # # # # # # FRED_BASE_URL = 'https://api.stlouisfed.org/fred/' GEOFRED_BASE_URL = 'https://api.stlouisfed.org/geofred/' def append_fred_token(url): token = os.getenv('FRED_TOKEN') return f'{url}&api_key={token}' FRED_SERIES_OBS_URL = FRED_BASE_URL + 'series/observations?' def fred_series(series, file_type=None, realtime_start=None, realtime_end=None, limit=None, offset=None, sort_order=None, observation_start=None, observation_end=None, units=None, frequency=None, aggregation_method=None, output_type=None, vintage_dates=None): ## Returns time series historical data for the requested FRED data. url = FRED_SERIES_OBS_URL + f'series_id={series}' if file_type: url += f'&file_type={file_type}' if realtime_start: url += f'&realtime_start={realtime_start}' if realtime_end: url += f'&realtime_end={realtime_end}' if limit: url += f'&limit={limit}' if offset: url += f'&offset={offset}' if sort_order: url += f'&sort_order={sort_order}' if observation_start: url += f'&observation_start={observation_start}' if observation_end: url += f'&observation_end={observation_end}' if units: url += f'&units={units}' if frequency: url += f'&frequency={frequency}' if aggregation_method: url += f'&aggregation_method={aggregation_method}' if output_type: url += f'&output_type={output_type}' if vintage_dates: url += f'&vintage_dates={vintage_dates}' url = append_fred_token(url) result = requests.get(url) return result.text GEOFRED_SERIES_META_URL = GEOFRED_BASE_URL + 'series/group?' def geofred_series_meta(series_id, file_type=None): ## Returns meta data for the requested FRED data. url = GEOFRED_SERIES_META_URL + f'series_id={series_id}' if file_type: url += f'&file_type={file_type}' url = append_fred_token(url) result = requests.get(url) return result.text GEOFRED_REGIONAL_SERIES_URL = GEOFRED_BASE_URL + 'series/data?' def geofred_regional_series(series_id, file_type=None, date=None, start_date=None): ## Returns the historical, geographically organized time series data for the requested FRED data. url = GEOFRED_REGIONAL_SERIES_URL + f'series_id={series_id}' if file_type: url += f'&file_type={file_type}' if date: url += f'&date={date}' if start_date: url += f'&start_date={start_date}' url = append_fred_token(url) result = requests.get(url) return result.text # # # # # # # # # # # # # # # # # GOVERNMENT YIELD CURVE DATA # # # # # # # # # # # # # # # # # GOV_YIELD_URL = 'https://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData?$filter=month(NEW_DATE)%20eq%204%20and%20year(NEW_DATE)%20eq%202019' def get_yield(): ## Returns government treasury bond yields. Organized in Python dictionary format by bond length. # Formatting of XML to Python Dict curve = requests.get(GOV_YIELD_URL) parse_curve = xmltodict.parse(curve.content) # This is based around retrieving the n last dates or average of n days. feed = parse_curve['feed'] entry = feed['entry'] last_entry = len(entry)-1 content = entry[last_entry]['content']['m:properties'] # Dict that contains the whole yield curve so there is no need to bring in each rate. yield_curve_values = { 'date' : entry[last_entry]['content']['m:properties']['d:NEW_DATE']['#text'], '1month' : float(content['d:BC_1MONTH']['#text']), '2month' : float(content['d:BC_2MONTH']['#text']), '3month' : float(content['d:BC_3MONTH']['#text']), '6month' : float(content['d:BC_6MONTH']['#text']), '1year' : float(content['d:BC_1YEAR']['#text']), '2year' : float(content['d:BC_2YEAR']['#text']), '3year' : float(content['d:BC_3YEAR']['#text']), '5year' : float(content['d:BC_5YEAR']['#text']), '10year' : float(content['d:BC_10YEAR']['#text']), '20year' : float(content['d:BC_20YEAR']['#text']), '30year' : float(content['d:BC_30YEAR']['#text']), } return yield_curve_values
44
259
0.676499
573
4,136
4.663176
0.228621
0.02994
0.053518
0.061752
0.242141
0.203219
0.158308
0.147081
0.119386
0.119386
0
0.013392
0.169487
4,136
93
260
44.473118
0.764483
0.138781
0
0.176471
0
0.014706
0.3033
0.122525
0
0
0
0
0
1
0.073529
false
0
0.073529
0
0.220588
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a22e67655b4062b0aecbc7e8062db32e1383d10
1,268
py
Python
settings.py
Cradac/mattermost-octane-integration
6a3cb4d2e0854cbf190f66467b604e6e4344a907
[ "MIT" ]
null
null
null
settings.py
Cradac/mattermost-octane-integration
6a3cb4d2e0854cbf190f66467b604e6e4344a907
[ "MIT" ]
null
null
null
settings.py
Cradac/mattermost-octane-integration
6a3cb4d2e0854cbf190f66467b604e6e4344a907
[ "MIT" ]
null
null
null
''' This is the Settings File for the Mattermost-Octane Bridge. You can change various variables here to customize and set up the client. ''' '''----------------------Mattermost Webhook Configuration----------------------''' #URL of the webhook from mattermost. To create one go to `Main Menu -> Integrations -> Incoming Webhooks` and press `Add Incoming Webhook` mm_webhook_url = 'http://localhost:8065/hooks/yuro8xrfeffj787cj1bwc4ziue' #Override the channel to send the notifications to, use the channel name as a String mm_channel = None #Set a custom Username to display in Mattermost mm_username = 'Defect Notification' #Set a custom Profile Image for the Client mm_profileimage = 'https://i.imgur.com/7Wg3Tgs.png' #Telekom T Image #The latter two need to be enabled in the settings.json of the Mattermost server '''----------------------------Flask Configuration----------------------------''' #set external IP for the Flask Server to create a Webhook for ALM Octane #local: 127.0.0.1 / False #default external: 0.0.0.0 (will default to only available external adress) external_ip = False #default: 5000 port = 5000 #external webhook verify token can be set here, if set as `None` it will be autogenerated & changed on each startup. wh_token = None
34.27027
138
0.706625
184
1,268
4.831522
0.554348
0.008999
0.022497
0
0
0
0
0
0
0
0
0.027726
0.146688
1,268
36
139
35.222222
0.7939
0.656151
0
0
0
0
0.407843
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a23e2e394242f4eca447da628bb8ac3e7fe2807
6,323
py
Python
tools/aerial_detection.py
gfjiangly/AerialDetection
ee8a945c67c8e9ddef725900ac300d2d5a785e08
[ "Apache-2.0" ]
null
null
null
tools/aerial_detection.py
gfjiangly/AerialDetection
ee8a945c67c8e9ddef725900ac300d2d5a785e08
[ "Apache-2.0" ]
1
2021-08-28T15:48:14.000Z
2021-08-28T15:48:14.000Z
tools/aerial_detection.py
gfjiangly/AerialDetection
ee8a945c67c8e9ddef725900ac300d2d5a785e08
[ "Apache-2.0" ]
null
null
null
# -*- encoding:utf-8 -*- # @Time : 2021/1/3 15:15 # @Author : gfjiang import os.path as osp import mmcv import numpy as np import cvtools import matplotlib.pyplot as plt import cv2.cv2 as cv from functools import partial import torch import math from cvtools.utils.path import add_prefix_filename_suffix from mmdet.ops import nms from mmdet.apis import init_detector, inference_detector def draw_features(module, input, output, work_dir='./'): x = output.cpu().numpy() out_channels = list(output.shape)[1] height = int(math.sqrt(out_channels)) width = height if list(output.shape)[2] < 128: return fig = plt.figure(figsize=(32, 32)) fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05, hspace=0.05) for i in range(height * width): plt.subplot(height, width, i + 1) plt.axis('off') img = x[0, i, :, :] pmin = np.min(img) pmax = np.max(img) img = ((img - pmin) / (pmax - pmin + 0.000001))*255 # float在[0,1]之间,转换成0-255 img = img.astype(np.uint8) # 转成unit8 img = cv.applyColorMap(img, cv.COLORMAP_JET) # 生成heat map img = img[:, :, ::-1] # 注意cv2(BGR)和matplotlib(RGB)通道是相反的 plt.imshow(img) # print("{}/{}".format(i,width*height)) savename = get_image_name_for_hook(module, work_dir) fig.savefig(savename, dpi=100) fig.clf() plt.close() def get_image_name_for_hook(module, work_dir='./'): """ Generate image filename for hook function Parameters: ----------- module: module of neural network """ # os.makedirs(work_dir, exist_ok=True) module_name = str(module) base_name = module_name.split('(')[0] index = 0 image_name = '.' # '.' is surely exist, to make first loop condition True while osp.exists(image_name): index += 1 image_name = osp.join( work_dir, 'feats', '%s_%d.png' % (base_name, index)) return image_name class AerialDetectionOBB(object): def __init__(self, config, pth): self.imgs = [] self.cfg = mmcv.Config.fromfile(config) self.pth = pth print('loading model {} ...'.format(pth)) self.model = init_detector(self.cfg, self.pth, device='cuda:0') self.results = [] self.img_detected = [] # self.vis_feats((torch.nn.Conv2d, torch.nn.MaxPool2d)) def __call__(self, imgs_or_path, det_thrs=0.5, vis=False, vis_thr=0.5, save_root=''): if isinstance(imgs_or_path, str): self.imgs += cvtools.get_files_list(imgs_or_path) else: self.imgs += imgs_or_path prog_bar = mmcv.ProgressBar(len(self.imgs)) for _, img in enumerate(self.imgs): self.detect(img, det_thrs=det_thrs, vis=vis, vis_thr=vis_thr, save_root=save_root) prog_bar.update() def detect(self, img, det_thrs=0.5, vis=False, vis_thr=0.5, save_root=''): result = inference_detector(self.model, img) # result = self.nms(result) if isinstance(det_thrs, float): det_thrs = [det_thrs] * len(result) if vis: to_file = osp.join(save_root, osp.basename(img)) to_file = add_prefix_filename_suffix(to_file, suffix='_obb') self.vis(img, result, vis_thr=vis_thr, to_file=to_file) result = [det[det[..., -1] > det_thr] for det, det_thr in zip(result, det_thrs)] if len(result) == 0: print('detect: image {} has no object.'.format(img)) self.img_detected.append(img) self.results.append(result) return result def nms(self, result, nms_th=0.3): dets_num = [len(det_cls) for det_cls in result] result = np.vstack(result) _, ids = nms(result, nms_th) total_num = 0 nms_result = [] for num in dets_num: ids_cls = ids[np.where((total_num <= ids) & (ids < num))[0]] nms_result.append(result[ids_cls]) total_num += num return nms_result def vis(self, img, bbox_result, vis_thr=0.5, to_file='vis.jpg'): bboxes = np.vstack(bbox_result) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) inds = np.where(bboxes[:, -1] > vis_thr)[0] bboxes = bboxes[inds] labels = labels[inds] texts = [self.model.CLASSES[index]+'|'+str(round(bbox[-1], 2)) for index, bbox in zip(labels, bboxes)] img = cvtools.draw_boxes_texts( img, bboxes[:, :-1], box_format='polygon', line_width=2) cvtools.imwrite(img, to_file) def vis_feats(self, modules_for_plot): h, w = self.cfg.data.train.img_scale for name, module in self.model.named_modules(): if isinstance(module, modules_for_plot): draw_features_func = partial( draw_features, work_dir=self.cfg.work_dir) module.register_forward_hook(draw_features_func) def save_results(self, save): str_results = '' for i, img in enumerate(self.img_detected): result = self.results[i] img = osp.basename(img) for cls_index, dets in enumerate(result): cls = self.model.CLASSES[cls_index] for box in dets: bbox_str = ','.join(map(str, map(int, box[:4]))) str_results += ' '.join([img, cls, bbox_str]) + '\n' with open(save, 'w') as f: f.write(str_results) if __name__ == '__main__': config_file = 'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2.py' pth_file = 'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2/epoch_12.pth' detector = AerialDetectionOBB(config_file, pth_file) detector('/media/data/DOTA/crop/P2701_2926_1597_3949_2620.png', vis=True, save_root='work_dirs/attention_vis/') detector.save_results('work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2/detect_result.txt')
36.33908
107
0.591966
857
6,323
4.148191
0.305718
0.015752
0.011252
0.006751
0.076512
0.076512
0.076512
0.076512
0.058509
0.046695
0
0.029509
0.281828
6,323
173
108
36.549133
0.753358
0.071643
0
0.055944
0
0
0.069285
0.050077
0
0
0
0
0
1
0.062937
false
0
0.083916
0
0.181818
0.013986
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a248f7412b4e5841bfb0f9c54b8c6e82ae3813b
19,715
py
Python
tradingAPI/low_level.py
federico123579/Trading212-API
0fab20b71a2348e72bbe76071b81f3692128851f
[ "MIT" ]
44
2017-10-23T19:17:20.000Z
2021-09-06T17:01:49.000Z
tradingAPI/low_level.py
federico123579/Trading212-API
0fab20b71a2348e72bbe76071b81f3692128851f
[ "MIT" ]
7
2017-09-05T09:51:16.000Z
2020-05-17T11:23:27.000Z
tradingAPI/low_level.py
federico123579/Trading212-API
0fab20b71a2348e72bbe76071b81f3692128851f
[ "MIT" ]
18
2017-11-18T11:55:58.000Z
2021-04-11T14:23:12.000Z
# -*- coding: utf-8 -*- """ tradingAPI.low_level ~~~~~~~~~~~~~~ This module provides the low level functions with the service. """ import time import re from datetime import datetime from pyvirtualdisplay import Display from bs4 import BeautifulSoup from splinter import Browser from .glob import Glob from .links import path from .utils import num, expect, get_pip # exceptions from tradingAPI import exceptions import selenium.common.exceptions # logging import logging logger = logging.getLogger('tradingAPI.low_level') class Stock(object): """base class for stocks""" def __init__(self, product): self.product = product self.market = True self.records = [] def new_rec(self, rec): """add a record""" self.records.append(rec) return self.records class Movement(object): """class-storing movement""" def __init__(self, product, quantity, mode, price): self.product = product self.quantity = quantity self.mode = mode self.price = price class PurePosition(object): """class-storing position""" def __init__(self, product, quantity, mode, price): self.product = product self.quantity = quantity self.mode = mode self.price = price def __repr__(self): return ' - '.join([str(self.product), str(self.quantity), str(self.mode), str(self.price)]) class LowLevelAPI(object): """low level api to interface with the service""" def __init__(self, brow="firefox"): self.brow_name = brow self.positions = [] self.movements = [] self.stocks = [] # init globals Glob() def launch(self): """launch browser and virtual display, first of all to be launched""" try: # init virtual Display self.vbro = Display() self.vbro.start() logger.debug("virtual display launched") except Exception: raise exceptions.VBroException() try: self.browser = Browser(self.brow_name) logger.debug(f"browser {self.brow_name} launched") except Exception: raise exceptions.BrowserException( self.brow_name, "failed to launch") return True def css(self, css_path, dom=None): """css find function abbreviation""" if dom is None: dom = self.browser return expect(dom.find_by_css, args=[css_path]) def css1(self, css_path, dom=None): """return the first value of self.css""" if dom is None: dom = self.browser def _css1(path, domm): """virtual local func""" return self.css(path, domm)[0] return expect(_css1, args=[css_path, dom]) def search_name(self, name, dom=None): """name find function abbreviation""" if dom is None: dom = self.browser return expect(dom.find_by_name, args=[name]) def xpath(self, xpath, dom=None): """xpath find function abbreviation""" if dom is None: dom = self.browser return expect(dom.find_by_xpath, args=[xpath]) def elCss(self, css_path, dom=None): """check if element is present by css""" if dom is None: dom = self.browser return expect(dom.is_element_present_by_css, args=[css_path]) def elXpath(self, xpath, dom=None): """check if element is present by css""" if dom is None: dom = self.browser return expect(dom.is_element_present_by_xpath, args=[xpath]) def login(self, username, password, mode="demo"): """login function""" url = "https://trading212.com/it/login" try: logger.debug(f"visiting %s" % url) self.browser.visit(url) logger.debug(f"connected to %s" % url) except selenium.common.exceptions.WebDriverException: logger.critical("connection timed out") raise try: self.search_name("login[username]").fill(username) self.search_name("login[password]").fill(password) self.css1(path['log']).click() # define a timeout for logging in timeout = time.time() + 30 while not self.elCss(path['logo']): if time.time() > timeout: logger.critical("login failed") raise CredentialsException(username) time.sleep(1) logger.info(f"logged in as {username}") # check if it's a weekend if mode == "demo" and datetime.now().isoweekday() in range(5, 8): timeout = time.time() + 10 while not self.elCss(path['alert-box']): if time.time() > timeout: logger.warning("weekend trading alert-box not closed") break if self.elCss(path['alert-box']): self.css1(path['alert-box']).click() logger.debug("weekend trading alert-box closed") except Exception as e: logger.critical("login failed") raise exceptions.BaseExc(e) return True def logout(self): """logout func (quit browser)""" try: self.browser.quit() except Exception: raise exceptions.BrowserException(self.brow_name, "not started") return False self.vbro.stop() logger.info("logged out") return True def get_bottom_info(self, info): accepted_values = { 'free_funds': 'equity-free', 'account_value': 'equity-total', 'live_result': 'equity-ppl', 'used_margin': 'equity-margin'} try: info_label = accepted_values[info] val = self.css1("div#%s span.equity-item-value" % info_label).text return num(val) except KeyError as e: raise exceptions.BaseExc(e) def get_price(self, name): soup = BeautifulSoup( self.css1("div.scrollable-area-content").html, "html.parser") for product in soup.select("div.tradebox"): fullname = product.select("span.instrument-name")[0].text.lower() if name.lower() in fullname: mark_closed_list = [x for x in product.select( "div.quantity-list-input-wrapper") if x.select( "div.placeholder")[0].text.lower().find("close") != -1] if mark_closed_list: sell_price = product.select("div.tradebox-price-sell")[0]\ .text return float(sell_price) else: return False class MovementWindow(object): """add movement window""" def __init__(self, api, product): self.api = api self.product = product self.state = 'initialized' self.insfu = False def open(self, name_counter=None): """open the window""" if self.api.css1(path['add-mov']).visible: self.api.css1(path['add-mov']).click() else: self.api.css1('span.dataTable-no-data-action').click() logger.debug("opened window") self.api.css1(path['search-box']).fill(self.product) if self.get_result(0) is None: self.api.css1(path['close']).click() raise exceptions.ProductNotFound(self.product) result, product = self.search_res(self.product, name_counter) result.click() if self.api.elCss("div.widget_message"): self.decode(self.api.css1("div.widget_message")) self.product = product self.state = 'open' def _check_open(self): if self.state == 'open': return True else: raise exceptions.WindowException() def close(self): """close a movement""" self._check_open() self.api.css1(path['close']).click() self.state = 'closed' logger.debug("closed window") def confirm(self): """confirm the movement""" self._check_open() self.get_price() self.api.css1(path['confirm-btn']).click() widg = self.api.css("div.widget_message") if widg: self.decode(widg[0]) raise exceptions.WidgetException(widg) if all(x for x in ['quantity', 'mode'] if hasattr(self, x)): self.api.movements.append(Movement( self.product, self.quantity, self.mode, self.price)) logger.debug("%s movement appended to the list" % self.product) self.state = 'conclused' logger.debug("confirmed movement") def search_res(self, res, check_counter=None): """search for a res""" logger.debug("searching result") result = self.get_result(0) name = self.get_research_name(result) x = 0 while not self.check_name(res, name, counter=check_counter): name = self.get_research_name(self.get_result(x)) if name is None: self.api.css1(path['close']).click() raise exceptions.ProductNotFound(res) logger.debug(name) if self.check_name(res, name, counter=check_counter): return self.get_result(x) x += 1 logger.debug("found product at position %d" % (x + 1)) return result, name def check_name(self, name, string, counter=None): """if both in string return False""" name = name.lower() string = string.lower() if counter is None: if name in string: return True else: return False counter = counter.lower() if name in string and counter in string: logger.debug("check_name: counter found in string") return False elif name in string and counter not in string: return True else: return False def get_research_name(self, res): """return result name""" if res is None: return None return self.api.css1("span.instrument-name", res).text def get_result(self, pos): """get pos result, where 0 is first""" evalxpath = path['res'] + f"[{pos + 1}]" try: res = self.api.xpath(evalxpath)[0] return res except Exception: return None def set_limit(self, category, mode, value): """set limit in movement window""" self._check_open() if (mode not in ["unit", "value"] or category not in ["gain", "loss", "both"]): raise ValueError() if not hasattr(self, 'stop_limit'): self.stop_limit = {'gain': {}, 'loss': {}} logger.debug("initialized stop_limit") if category == 'gain': self.api.xpath( path['limit-gain-%s' % mode])[0].fill(str(value)) elif category == 'loss': self.api.xpath( path['limit-loss-%s' % mode])[0].fill(str(value)) if category != 'both': self.stop_limit[category]['mode'] = mode self.stop_limit[category]['value'] = value elif category == 'both': self.api.xpath( path['limit-gain-%s' % mode])[0].fill(str(value)) self.api.xpath( path['limit-loss-%s' % mode])[0].fill(str(value)) for cat in ['gain', 'loss']: self.stop_limit[cat]['mode'] = mode self.stop_limit[cat]['value'] = value logger.debug("set limit") def decode(self, message): """decode text pop-up""" title = self.api.css1("div.title", message).text text = self.api.css1("div.text", message).text if title == "Insufficient Funds": self.insfu = True elif title == "Maximum Quantity Limit": raise exceptions.MaxQuantLimit(num(text)) elif title == "Minimum Quantity Limit": raise exceptions.MinQuantLimit(num(text)) logger.debug("decoded message") def decode_update(self, message, value, mult=0.1): """decode and update the value""" try: msg_text = self.api.css1("div.text", message).text return num(msg_text) except Exception: if msg_text.lower().find("higher") != -1: value += value * mult return value else: self.decode(message) return None def get_mov_margin(self): """get the margin of the movement""" self._check_open() return num(self.api.css1("span.cfd-order-info-item-value").text) def set_mode(self, mode): """set mode (buy or sell)""" self._check_open() if mode not in ["buy", "sell"]: raise ValueError() self.api.css1(path[mode + '-btn']).click() self.mode = mode logger.debug("mode set") def get_quantity(self): """gte current quantity""" self._check_open() quant = int(num(self.api.css1(path['quantity']).value)) self.quantity = quant return quant def set_quantity(self, quant): """set quantity""" self._check_open() self.api.css1(path['quantity']).fill(str(int(quant))) self.quantity = quant logger.debug("quantity set") def get_price(self, mode='buy'): """get current price""" if mode not in ['buy', 'sell']: raise ValueError() self._check_open() price = num(self.api.css1( "div.orderdialog div.tradebox-price-%s" % mode).text) self.price = price return price def get_unit_value(self): """get unit value of stock based on margin, memoized""" # find in the collection try: unit_value = Glob().theCollector.collection['unit_value'] unit_value_res = unit_value[self.product] logger.debug("unit_value found in the collection") return unit_value_res except KeyError: logger.debug("unit_value not found in the collection") pip = get_pip(mov=self) quant = 1 / pip if hasattr(self, 'quantity'): old_quant == self.quantity self.set_quantity(quant) # update the site time.sleep(0.5) margin = self.get_mov_margin() logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}") if 'old_quant' in locals(): self.set_quantity(old_quant) unit_val = margin / quant self.unit_value = unit_val Glob().unit_valueHandler.add_val({self.product: unit_val}) return unit_val def new_mov(self, name): """factory method pattern""" return self.MovementWindow(self, name) class Position(PurePosition): """position object""" def __init__(self, api, html_div): """initialized from div""" self.api = api if isinstance(html_div, type('')): self.soup_data = BeautifulSoup(html_div, 'html.parser') else: self.soup_data = html_div self.product = self.soup_data.select("td.name")[0].text self.quantity = num(self.soup_data.select("td.quantity")[0].text) if ("direction-label-buy" in self.soup_data.select("td.direction")[0].span['class']): self.mode = 'buy' else: self.mode = 'sell' self.price = num(self.soup_data.select("td.averagePrice")[0].text) self.margin = num(self.soup_data.select("td.margin")[0].text) self.id = self.find_id() def update(self, soup): """update the soup""" self.soup_data = soup return soup def find_id(self): """find pos ID with with given data""" pos_id = self.soup_data['id'] self.id = pos_id return pos_id @property def close_tag(self): """obtain close tag""" return f"#{self.id} div.close-icon" def close(self): """close position via tag""" self.api.css1(self.close_tag).click() try: self.api.xpath(path['ok_but'])[0].click() except selenium.common.exceptions.ElementNotInteractableException: if (self.api.css1('.widget_message div.title').text == 'Market Closed'): logger.error("market closed, position can't be closed") raise exceptions.MarketClosed() raise exceptions.WidgetException( self.api.css1('.widget_message div.text').text) # wait until it's been closed # set a timeout timeout = time.time() + 10 while self.api.elCss(self.close_tag): time.sleep(0.1) if time.time() > timeout: raise TimeoutError("failed to close pos %s" % self.id) logger.debug("closed pos %s" % self.id) def get_gain(self): """get current profit""" gain = num(self.soup_data.select("td.ppl")[0].text) self.gain = gain return gain def bind_mov(self): """bind the corresponding movement""" logger = logging.getLogger("tradingAPI.low_level.bind_mov") mov_list = [x for x in self.api.movements if x.product == self.product and x.quantity == self.quantity and x.mode == self.mode] if not mov_list: logger.debug("fail: mov not found") return None else: logger.debug("success: found movement") for x in mov_list: # find approximate price max_roof = self.price + self.price * 0.01 min_roof = self.price - self.price * 0.01 if min_roof < x.price < max_roof: logger.debug("success: price corresponding") # bind mov self.mov = x return x else: logger.debug("fail: price %f not corresponding to %f" % (self.price, x.price)) continue # if nothing, return None return None def new_pos(self, html_div): """factory method pattern""" pos = self.Position(self, html_div) pos.bind_mov() self.positions.append(pos) return pos
37.058271
79
0.526401
2,198
19,715
4.626934
0.150136
0.024779
0.022714
0.014749
0.24061
0.17414
0.143461
0.122911
0.094592
0.087316
0
0.006413
0.359371
19,715
531
80
37.12806
0.798812
0.073447
0
0.252404
0
0
0.109171
0.011754
0
0
0
0
0
1
0.108173
false
0.004808
0.028846
0.002404
0.259615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a258262e455109304caf1e67879b046459ff1bf
5,066
py
Python
aws-regions.py
groorj/cloud-regions
f085491c71440d99000ad29a885e6090dfc9332a
[ "MIT" ]
null
null
null
aws-regions.py
groorj/cloud-regions
f085491c71440d99000ad29a885e6090dfc9332a
[ "MIT" ]
1
2021-07-22T01:25:14.000Z
2021-07-22T17:29:09.000Z
aws-regions.py
groorj/cloud-regions
f085491c71440d99000ad29a885e6090dfc9332a
[ "MIT" ]
null
null
null
import json import logging import os import inspect import urllib import urllib.request from urllib.error import HTTPError # logger logger = logging.getLogger() logger_level = logging.getLevelName(os.environ['LOGGER_LEVEL']) logger.setLevel(logger_level) # validate access def validate_access(event, context): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) logger.debug("RESTRICTED_ACCESS_ENABLED: [%s]", os.environ['RESTRICTED_ACCESS_ENABLED']) error_message = "You are not allowed, get out!" if os.environ['RESTRICTED_ACCESS_ENABLED'] == 'true': logger.info("Restricted access is enabled") logger.info("Value for header [%s] is: [%s]", os.environ['RESTRICTED_ACCESS_HTTP_HEADER'], event["headers"][os.environ['RESTRICTED_ACCESS_HTTP_HEADER']]) if event["headers"][os.environ['RESTRICTED_ACCESS_HTTP_HEADER']] != os.environ['RESTRICTED_ACCESS_SECRET']: logger.info("Key provided is not valid") logger.debug("Error: [%s]", error_message) http_code = 403 raise ValueError(http_code, error_message) else: logger.info("Key provided is valid") else: logger.info("Restricted access is NOT enabled") # create response def create_response_new(status_code, message_body): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) return { 'statusCode': str(status_code), 'body': json.dumps(message_body), 'headers': { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' }, } # download json file def get_json(): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) try: response = urllib.request.urlopen(os.environ['AWS_REGIONS_JSON_URL']) except HTTPError as err: # catch HTTP error logger.debug("HTTP error: [%s]", err) raise json_data = json.loads(response.read()) return json_data # entry point -> return region info def get_region_info(event, context): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) return_info_final = {} # validate the access to this resource try: validate_access(event, context) except ValueError as err: return_info_final['request'] = { "request_status": "Fail", "error_message": err.args[1], "http_error_code": err.args[0] } return create_response_new(err.args[0], return_info_final) # get region info region_code = event['pathParameters']['region_code'] logger.debug("region_code: [%s]", region_code) try: json_data = get_json() except HTTPError as err: # http_code = err.code http_code = 500 return_info_final['request'] = { "request_status": "Fail", "error_message": "Error getting Regions information.", "http_error_code": err.code } return create_response_new(http_code, return_info_final) # logger.debug("json_data: [%s]", json_data) # logger.debug("type(json_data): [%s]", type(json_data)) for element in json_data['data']: # logger.debug("code: [%s] && region_code: [%s]", element['code'], region_code) if element['code'] == region_code: logger.info("region_code found") http_code = 200 return_info_final['request'] = { "request_status": "Success" } return_info_final['info'] = json_data['info'] return_info_final['data'] = element break else: logger.info("region_code NOT found") return_info = "Region code NOT found." http_code = 404 return_info_final['request'] = { "request_status": "Fail", "error_message": "Region code NOT found.", "http_error_code": http_code } return create_response_new(http_code, return_info_final) # entry point -> return region info def get_all_regions_info(event, context): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) return_info_final = {} # validate the access to this resource try: validate_access(event, context) except ValueError as err: return_info_final['request'] = { "request_status": "Fail", "error_message": err.args[1], "http_error_code": err.args[0] } return create_response_new(err.args[0], return_info_final) # get regions info try: json_data = get_json() except HTTPError as err: # http_code = err.code http_code = 500 return_info_final['request'] = { "request_status": "Fail", "error_message": "Error getting Regions information.", "http_error_code": err.code } return create_response_new(http_code, return_info_final) logger.debug("json_data: [%s]", json_data) http_code = 200 return_info_final['request'] = { "request_status": "Success" } return_info_final['info'] = json_data['info'] return_info_final['data'] = json_data['data'] return create_response_new(http_code, return_info_final) # End;
41.867769
161
0.66443
640
5,066
5.004688
0.171875
0.062441
0.088979
0.04808
0.637527
0.563222
0.552295
0.532313
0.502966
0.472682
0
0.005969
0.206277
5,066
121
162
41.867769
0.790599
0.09317
0
0.442105
0
0
0.257811
0.046756
0
0
0
0
0
1
0.052632
false
0
0.073684
0
0.210526
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a267a4563f9753a8ce7cda07a22ac19aca67d1a
10,157
py
Python
src/models/encoder.py
guowenying111/SEKE
a913a19090eb690c3188036795559210a5262f2b
[ "Apache-2.0" ]
null
null
null
src/models/encoder.py
guowenying111/SEKE
a913a19090eb690c3188036795559210a5262f2b
[ "Apache-2.0" ]
null
null
null
src/models/encoder.py
guowenying111/SEKE
a913a19090eb690c3188036795559210a5262f2b
[ "Apache-2.0" ]
null
null
null
import math import torch import torch.nn as nn from models.neural import MultiHeadedAttention, PositionwiseFeedForward from models.rnn import LayerNormLSTM class Classifier(nn.Module): def __init__(self, hidden_size): super(Classifier, self).__init__() self.linear1 = nn.Linear(hidden_size, 1) self.sigmoid = nn.Sigmoid() def forward(self, x, mask_cls): h = self.linear1(x).squeeze(-1) sent_scores = self.sigmoid(h) * mask_cls.float() return sent_scores class PositionalEncoding(nn.Module): def __init__(self, dropout, dim, max_len=5000): pe = torch.zeros(max_len, dim) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim))) pe[:, 0::2] = torch.sin(position.float() * div_term) pe[:, 1::2] = torch.cos(position.float() * div_term) pe = pe.unsqueeze(0) super(PositionalEncoding, self).__init__() self.register_buffer('pe', pe) self.dropout = nn.Dropout(p=dropout) self.dim = dim def forward(self, emb, step=None): emb = emb * math.sqrt(self.dim) if (step): emb = emb + self.pe[:, step][:, None, :] else: emb = emb + self.pe[:, :emb.size(1)] emb = self.dropout(emb) return emb def get_emb(self, emb): return self.pe[:, :emb.size(1)] class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, heads, d_ff, dropout): super(TransformerEncoderLayer, self).__init__() self.self_attn = MultiHeadedAttention( heads, d_model, dropout=dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) self.dropout = nn.Dropout(dropout) def forward(self, iter, query, inputs, mask): if (iter != 0): input_norm = self.layer_norm(inputs) else: input_norm = inputs mask = mask.unsqueeze(1) context = self.self_attn(input_norm, input_norm, input_norm, mask=mask) out = self.dropout(context) + inputs return self.feed_forward(out) class TransformerInterEncoder(nn.Module): def __init__(self, d_model, d_ff, heads, dropout, num_inter_layers=0): super(TransformerInterEncoder, self).__init__() self.d_model = d_model self.num_inter_layers = num_inter_layers self.pos_emb = PositionalEncoding(dropout, d_model) self.transformer_inter = nn.ModuleList( [TransformerEncoderLayer(d_model, heads, d_ff, dropout) for _ in range(num_inter_layers)]) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) self.wo = nn.Linear(d_model, 1, bias=True) self.sigmoid = nn.Sigmoid() def forward(self, top_vecs, mask): """ See :obj:`EncoderBase.forward()`""" batch_size, n_sents = top_vecs.size(0), top_vecs.size(1) pos_emb = self.pos_emb.pe[:, :n_sents] x = top_vecs * mask[:, :, None].float() x = x + pos_emb for i in range(self.num_inter_layers): x = self.transformer_inter[i](i, x, x, ~mask) # all_sents * max_tokens * dim x = self.layer_norm(x) sent_scores = self.sigmoid(self.wo(x)) sent_scores = sent_scores.squeeze(-1) * mask.float() return sent_scores class GRUEncoder_attn(nn.Module): def __init__(self,bidirectional, num_layers, input_size, hidden_size,dropout=0.0): super(GRUEncoder_attn,self).__init__() class RNNEncoder_attn(nn.Module): def __init__(self, bidirectional, num_layers, input_size, hidden_size, dropout=0.0): super(RNNEncoder_attn, self).__init__() num_directions = 2 if bidirectional else 1 assert hidden_size % num_directions == 0 hidden_size = hidden_size // num_directions self.relu = nn.ReLU() self.rnn = LayerNormLSTM( input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, bidirectional=bidirectional) self.wo = nn.Linear(num_directions * hidden_size, 1, bias=True) self.dropout = nn.Dropout(dropout) self.softmax = nn.Softmax() print('this is dropout',dropout) def forward(self, x, mask): """See :func:`EncoderBase.forward()`""" batch, layer, seq, hidden = x.size() x1=x.contiguous().view(batch * layer, -1, hidden) x1 = torch.transpose(x1, 1, 0) memory_bank, _ = self.rnn(x1) memory_bank = self.dropout(memory_bank) + x1 memory_bank = torch.transpose(memory_bank, 1, 0) # sent_scores = self.softmax(self.relu(self.wo(memory_bank)).squeeze(dim=-1)).unsqueeze(-1) sent_scores = self.softmax(self.relu(self.wo(memory_bank[:,-1,:])).squeeze(dim=-1).view(-1,layer)).unsqueeze(-1) x=x.transpose(1,2) sent_vec = torch.matmul(sent_scores.transpose(1,2).unsqueeze(dim = 1).expand(batch,seq,1,layer),x) return sent_vec.squeeze(dim = 2) class TransformerDecoderLayer(nn.Module): def __init__(self, d_model, heads, d_ff, dropout): super(TransformerDecoderLayer, self).__init__() self.self_attn = MultiHeadedAttention( heads, d_model, dropout=dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) def forward(self, iter, ent_enc, inputs, self_attn_mask=None,context_attn_mask=None): context = self.self_attn(inputs, inputs, inputs, mask=self_attn_mask) dec_output = self.self_attn( ent_enc, ent_enc, context, mask=context_attn_mask) dec_output = self.feed_forward(dec_output) return dec_output class TransformerInterDecoder(nn.Module): def __init__(self, d_model, d_ff, heads, dropout, d_hidden, num_inter_layers=0): super(TransformerInterDecoder, self).__init__() self.d_model = d_model self.num_inter_layers = num_inter_layers self.pos_emb = PositionalEncoding(dropout, d_model) self.transformer_inter = nn.ModuleList( [TransformerDecoderLayer(d_model, heads, d_ff, dropout) for _ in range(num_inter_layers)]) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) self.wo = nn.Linear(d_model, d_hidden , bias=True) self.wi = nn.Linear(d_model, d_hidden, bias=True) self.v = nn.Linear(d_hidden, 1, bias=True) self.LR = nn.LeakyReLU() self.softmax = nn.Softmax(dim=-1) def forward(self, top_vecs, inputs, mask, label_mask=None): """ See :obj:`EncoderBase.forward()`""" n_out = inputs.size(1) pos_emb = self.pos_emb.pe[:, :n_out] seq_mask=subsequent_mask(inputs) self_attn_mask = torch.gt((~label_mask.unsqueeze(1).expand(-1, n_out, -1) + seq_mask), 0) inputs=inputs+pos_emb for i in range(self.num_inter_layers): inputs = self.transformer_inter[i](i, top_vecs, inputs,self_attn_mask,~ mask.unsqueeze(1).expand(-1, n_out,-1)) scores=self.v(self.LR( self.wo(inputs.unsqueeze(2)).expand(-1, -1, top_vecs.size(1), -1) + self.wi(top_vecs).unsqueeze( 1))).squeeze(-1) sent_scores = self.softmax(scores) return sent_scores class RNNEncoder(nn.Module): def __init__(self, bidirectional, num_layers, input_size, hidden_size, dropout=0.0): super(RNNEncoder, self).__init__() num_directions = 2 if bidirectional else 1 assert hidden_size % num_directions == 0 hidden_size = hidden_size // num_directions self.rnn = LayerNormLSTM( input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, bidirectional=bidirectional) self.wo = nn.Linear(num_directions * hidden_size, 1, bias=True) self.dropout = nn.Dropout(dropout) self.sigmoid = nn.Sigmoid() def forward(self, x, mask): """See :func:`EncoderBase.forward()`""" x = torch.transpose(x, 1, 0) memory_bank, _ = self.rnn(x) memory_bank = self.dropout(memory_bank) + x memory_bank = torch.transpose(memory_bank, 1, 0) sent_scores = self.sigmoid(self.wo(memory_bank)) sent_scores = sent_scores.squeeze(-1) * mask.float() return sent_scores class GCN(nn.Module): def __init__(self,in_channel,out_channel,hidden_dim,drop): super(GCN, self).__init__() self.in_channel=in_channel self.out_channel=out_channel self.hidden_dim=hidden_dim self.dropout = nn.Dropout(p=drop) self.gcn_x_11=GCNConv(self.in_channel,self.hidden_dim) self.gcn_x_12=GCNConv(self.hidden_dim,self.out_channel)#No.1-*2*2 # self.gcn_x_21=GCNConv(self.in_channel,self.hidden_dim) # self.gcn_x_22=GCNConv(self.hidden_dim,self.out_channel)#No.2-*2 # self.gcn_mix=GCNConv(self.hidden_dim*2,self.hidden_dim)#No.2-*2 self.relu=nn.ReLU(inplace=True) def forward(self, x_1, edge_index_1, edge_index_2=None,edge_weight_1=None,edge_weight_2=None): syn=self.gcn_x_11(x_1, edge_index_1, edge_weight_1) syn=self.relu(syn) syn=self.dropout(syn) syn = self.gcn_x_12(syn, edge_index_1, edge_weight_1) syn = self.relu(syn) syn = self.dropout(syn) # x2 = self.gcn_x_21(x_1, edge_index_2, edge_weight_2) # x2 = self.relu(x2) # x2 = self.dropout(x2) # mix = self.gcn_mix(torch.cat((syn,x2),-1), edge_index_2, edge_weight_2) # x2 = self.gcn_x_22(mix, edge_index_2, edge_weight_2) # syn=self.gcn_x_12(mix, edge_index_1, edge_weight_1) # syn=self.relu(syn) # syn=self.dropout(syn) # x2 = self.relu(x2) # x2 = self.dropout(x2) return syn
39.065385
123
0.632962
1,389
10,157
4.362851
0.115191
0.022772
0.018152
0.024752
0.592409
0.513201
0.483663
0.478053
0.45066
0.40132
0
0.020443
0.243871
10,157
259
124
39.216216
0.76862
0.07837
0
0.360825
0
0
0.001824
0
0
0
0
0
0.010309
1
0.103093
false
0
0.025773
0.005155
0.231959
0.005155
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a27086b8164b7eb22322aad33d91e3b2ba51e9e
974
py
Python
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/dropbox/views.py
DemarcusL/django_wiki_lab
3b7cf18af7e0f89c94d10eb953ca018a150a2f55
[ "MIT" ]
6,342
2015-01-01T07:40:30.000Z
2022-03-31T04:18:30.000Z
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/dropbox/views.py
DemarcusL/django_wiki_lab
3b7cf18af7e0f89c94d10eb953ca018a150a2f55
[ "MIT" ]
2,198
2015-01-02T15:17:45.000Z
2022-03-28T10:20:43.000Z
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/dropbox/views.py
DemarcusL/django_wiki_lab
3b7cf18af7e0f89c94d10eb953ca018a150a2f55
[ "MIT" ]
2,928
2015-01-01T10:44:13.000Z
2022-03-31T03:20:16.000Z
import requests from allauth.socialaccount.providers.oauth2.views import ( OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, ) from .provider import DropboxOAuth2Provider class DropboxOAuth2Adapter(OAuth2Adapter): provider_id = DropboxOAuth2Provider.id access_token_url = "https://api.dropbox.com/oauth2/token" authorize_url = "https://www.dropbox.com/oauth2/authorize" profile_url = "https://api.dropbox.com/2/users/get_current_account" redirect_uri_protocol = "https" def complete_login(self, request, app, token, **kwargs): response = requests.post( self.profile_url, headers={"Authorization": "Bearer %s" % (token.token,)}, ) response.raise_for_status() return self.get_provider().sociallogin_from_response(request, response.json()) oauth_login = OAuth2LoginView.adapter_view(DropboxOAuth2Adapter) oauth_callback = OAuth2CallbackView.adapter_view(DropboxOAuth2Adapter)
32.466667
86
0.737166
101
974
6.910891
0.564356
0.034384
0.031519
0.051576
0.060172
0
0
0
0
0
0
0.018337
0.160164
974
29
87
33.586207
0.834963
0
0
0
0
0
0.158111
0
0
0
0
0
0
1
0.045455
false
0
0.136364
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a27be0083c13f36f7583420125751ea6216ec63
1,599
py
Python
source/conf.py
Tatsh/upkeep
7fa99ff54104e3dec15d611eb174910337cf1870
[ "MIT" ]
3
2019-04-24T10:17:00.000Z
2020-03-11T06:18:42.000Z
source/conf.py
Tatsh/pezu
d5264b61a7113783ea29388180c16126cf185bdd
[ "MIT" ]
4
2020-04-27T19:56:29.000Z
2021-02-11T05:44:22.000Z
source/conf.py
Tatsh/pezu
d5264b61a7113783ea29388180c16126cf185bdd
[ "MIT" ]
null
null
null
# SPDX-License-Identifier: MIT # pylint: disable=redefined-builtin,invalid-name """See https://www.sphinx-doc.org/en/master/usage/configuration.html""" from typing import Sequence import os import sys # region Path setup sys.path.insert(0, os.path.abspath('..')) # endregion # region Project information project = 'Upkeep' copyright = '2020, Andrew Udvare' author = 'Andrew Udvare' # The short X.Y version version = '1.2.7' # The full version, including alpha/beta/rc tags release = f'v{version}' # endregion # region General configuration # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns: Sequence[str] = [] master_doc = 'index' # endregion # region Options for HTML output # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # endregion # region Extension configuration # endregion
33.3125
77
0.760475
232
1,599
5.189655
0.568966
0.049834
0.018272
0.024917
0.081395
0
0
0
0
0
0
0.005848
0.144465
1,599
47
78
34.021277
0.874269
0.700438
0
0
0
0
0.274554
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a29601d340f52437bc81f042a9b4102018cde77
19,710
py
Python
generators/generate_pybind11_bindings.py
sweptlaser/pclpy
ab84da7ed264b5bc918af0d858e6d4063275aab9
[ "MIT" ]
null
null
null
generators/generate_pybind11_bindings.py
sweptlaser/pclpy
ab84da7ed264b5bc918af0d858e6d4063275aab9
[ "MIT" ]
null
null
null
generators/generate_pybind11_bindings.py
sweptlaser/pclpy
ab84da7ed264b5bc918af0d858e6d4063275aab9
[ "MIT" ]
null
null
null
import os import platform import shutil import sys from collections import Counter from collections import defaultdict, OrderedDict from os.path import join from typing import List, Dict, Set from CppHeaderParser import CppHeaderParser from CppHeaderParser.CppHeaderParser import CppMethod import generators.dependency_tree from generators.config import common_includes, PCL_BASE, PATH_LOADER, PATH_MODULES, MODULES_TO_BUILD, \ HEADERS_TO_SKIP, ATTRIBUTES_TO_SKIP, CLASSES_TO_IGNORE, METHODS_TO_SKIP, SUBMODULES_TO_SKIP, EXPLICIT_INCLUDES, \ SPECIALIZED_TEMPLATED_TYPES_TO_SKIP from generators.definitions.function import generate_function_definitions, get_methods_defined_outside from generators.definitions.method import split_methods_by_type from generators.definitions.submodule_loader import generate_loader from generators.definitions.templated_class import ClassDefinition from generators.instantiations import Instantiations from generators.point_types_utils import unpack_yaml_point_types from generators.utils import make_header_include_name, sort_headers_by_dependencies, \ generate_main_loader, make_namespace_class, read_header_file def filter_methods_for_parser_errors(methods): return [m for m in methods if not m["name"] in ("void", "bool")] def filter_methods_to_skip(methods): filtered_methods = [] for m in methods: if (m["parent"]["name"], m["name"]) in METHODS_TO_SKIP: continue if "Callback" in m["name"]: single_argument = len(m["parameters"]) == 1 boost_function = single_argument and m["parameters"][0]["type"].startswith("boost::function") if not boost_function: continue filtered_methods.append(m) return filtered_methods def same_parameters(p1: Dict, p2: Dict) -> bool: fields = ["constant", "name", "raw_type", "reference", "static"] return all(p1[f] == p2[f] for f in fields) def same_methods(m1: CppMethod, m2: CppMethod) -> bool: if m1["name"] != m2["name"]: return False # bug in CppHeaderParser # in "void ImageGrabber<PointT>::publish", "void ImageGrabber<PointT>::" is the return type path = m1.get("path", m2.get("path")) path = path[path.rfind(":") + 1:] if not any(path in type_ for type_ in [m1["rtnType"], m2["rtnType"]]): return False # same parameters for p1 in m1["parameters"]: for p2 in m2["parameters"]: if m1["name"] == m2["name"] and same_parameters(p1, p2): break else: return False return len(m1["parameters"]) == len(m2["parameters"]) def private_methods_defined_outside(private_methods: List[CppMethod], methods_declared_outside: List[CppMethod]) -> List[CppMethod]: private_defined_outside = [] for m_private in private_methods: for m_outside in methods_declared_outside: if same_methods(m_private, m_outside): private_defined_outside.append(m_private) break return private_defined_outside def generate_class_definitions(main_classes, module, header_name, path, needs_overloading: List[str], methods_defined_outside: List[CppMethod]) -> str: text = [] a = text.append a(common_includes) a(EXPLICIT_INCLUDES.get((module, header_name), "")) a(make_header_include_name(module, header_name, path)) a("") namespaces = set([c["namespace"] for c in main_classes]) for namespace in namespaces: if not namespace == "pcl": a("using namespace %s;" % namespace) a("\n") for class_ in main_classes: methods = class_["methods"]["public"] methods = filter_methods_for_parser_errors(methods) methods = filter_methods_to_skip(methods) private_and_protected = class_["methods"]["private"] + class_["methods"]["protected"] methods += private_methods_defined_outside(private_and_protected, methods_defined_outside) class_properties = [p for p in class_["properties"]["public"] if not "using" in p["type"] and not "union" in p["type"]] union_properties = [p for nested_class in class_["nested_classes"] for p in nested_class["properties"]["public"] if "union" in nested_class["name"]] class_properties += union_properties class_properties = filter_class_properties(module, header_name, class_["name"], class_properties) constructors, variables, others = split_methods_by_type(methods, class_properties, needs_overloading) if not class_["can_be_instantiated"]: constructors = [] class_def = ClassDefinition(class_, constructors, variables, others, module) a(class_def.to_class_function_definition()) a("") return "\n".join(text) def filter_class_properties(module, header, class_name, properties): key = (module, header, class_name) # ignore properties without a name properties = [p for p in properties if p["name"]] if key in ATTRIBUTES_TO_SKIP: to_ignore = ATTRIBUTES_TO_SKIP[key] filtered_properties = [] for p in properties: if p["name"] in to_ignore: continue filtered_properties.append(p) properties = filtered_properties return properties def get_main_classes(header, module, header_name): # header = read_headers(base_path, header_name, module) main_classes = [c for c in header.classes.values() if c["namespace"] in ("pcl", "pcl::" + module)] filtered_main_classes = [] for class_ in main_classes: specialized_template = class_.get("template") and "<" in class_["name"] if specialized_template: to_skip = any(("<%s>" % type_) in class_["name"] for type_ in SPECIALIZED_TEMPLATED_TYPES_TO_SKIP) if not to_skip: message = "Warning: Template class specialization not implemented for class %s in %s" print(message % (class_["name"], header_name)) elif (module, header_name, class_["name"]) in CLASSES_TO_IGNORE: pass else: filtered_main_classes.append(class_) filtered_main_classes = sorted(filtered_main_classes, key=lambda c: c["name"]) return filtered_main_classes def get_functions(header, module): functions = [f for f in header.functions if f["namespace"] in ("pcl", "pcl::", "pcl::%s" % module, "pcl::%s::" % module)] functions = sorted(functions, key=lambda f: f["name"]) filtered = filter_module_level_functions(functions) return filtered def filter_module_level_functions(functions: List[CppMethod]): filtered = [] for f in functions: keep = True if f.get("returns_const"): keep = False for param in f["parameters"]: for type_ in SPECIALIZED_TEMPLATED_TYPES_TO_SKIP: if type_ in param["type"]: keep = False if keep: filtered.append(f) return filtered def get_variables(header): variables = [v for v in header.variables if v.get("defaultValue") and 'using' != v.get('type')] variables = sorted(variables, key=lambda v: v["name"]) return variables def get_enums(header): enums = [e for e in header.enums if e.get("name")] # skip nameless enums enums = sorted(enums, key=lambda v: v["name"]) return enums def read_header(header_path, skip_macros=None): # I tried to do this in multiple threads but it seems like CppHeaderParser is not thread safe... if skip_macros is None: skip_macros = [] header_file_str = read_header_file(header_path, skip_macros) parser = CppHeaderParser parser.debug = False header = parser.CppHeader(header_file_str, argType="string") return header def clean(): try: os.remove(PATH_LOADER) except FileNotFoundError: pass if os.path.exists(PATH_MODULES): shutil.rmtree(PATH_MODULES) def check_if_needs_overloading(main_classes): needs_overloading = {} classes_by_module = defaultdict(list) for (module, _), class_ in main_classes.items(): classes_by_module[module] += class_ for module, classes in classes_by_module.items(): needs = [] for class_ in classes: count = Counter(m["name"] for methods in class_["methods"].values() for m in methods) for name, count in count.items(): if count >= 2: needs.append(name) needs_overloading[module] = needs return needs_overloading def get_headers(modules=None, skip_modules=None): def listmod(module): found_modules = [] for base, folders, files in os.walk(join(PCL_BASE, module)): if any(base.endswith(m) for m in SUBMODULES_TO_SKIP): continue relative_base = os.path.abspath(base).replace(PCL_BASE, "")[1:] for f in files: if f.endswith(".h"): found_modules.append([f, join(relative_base, f)]) return found_modules if modules is None: modules = MODULES_TO_BUILD if skip_modules is not None: modules = [m for m in modules if m not in skip_modules] headers_to_generate = [(module, header_name, path) for module in modules for header_name, path in listmod(module)] base_headers = [("", f, f) for f in os.listdir(PCL_BASE) if f.endswith(".h")] headers_to_generate += base_headers headers_to_generate_temp = [] for module, header_name, path in headers_to_generate: if (module, header_name) in HEADERS_TO_SKIP: continue headers_to_generate_temp.append(tuple([module, header_name, path])) return headers_to_generate_temp def get_pure_virtual_methods(class_: CppHeaderParser.CppClass) -> Set[str]: access = "private protected public".split() return set([m["name"] for a in access for m in class_["methods"][a] if m["pure_virtual"]]) def get_all_class_methods_not_pure_virtual(class_: CppHeaderParser.CppClass) -> Set[str]: access = "private protected public".split() return set([m["name"] for a in access for m in class_["methods"][a] if not m["pure_virtual"]]) def flag_instantiatable_class(dependency_tree, main_classes): """determine if the class can be instantiated""" main_classes_by_name_namespace = {make_namespace_class(c["namespace"], c["name"]): c for classes in main_classes.values() for c in classes} for module, header_name in main_classes: for class_ in main_classes[(module, header_name)]: can_be_instantiated = True if class_["abstract"]: can_be_instantiated = False else: # check if any pure virtual method is not implemented all_implemented_inherited_methods = get_all_class_methods_not_pure_virtual(class_) namespace_class = make_namespace_class(class_["namespace"], class_["name"]) for base_name_nsp in dependency_tree.breadth_first_iterator(namespace_class): base_class = main_classes_by_name_namespace.get(base_name_nsp) if base_class: base_class_methods = get_all_class_methods_not_pure_virtual(base_class) all_implemented_inherited_methods.update(base_class_methods) for base_name_nsp in dependency_tree.breadth_first_iterator(namespace_class): base_class = main_classes_by_name_namespace.get(base_name_nsp) if base_class and base_class["abstract"]: base_pure_virtual_methods = get_pure_virtual_methods(base_class) if base_pure_virtual_methods - all_implemented_inherited_methods: can_be_instantiated = False class_["can_be_instantiated"] = can_be_instantiated def load_yaml_point_types(not_every_point_type): classes_point_types = unpack_yaml_point_types("point_types_generated.yml", not_every_point_type) extra_point_types = unpack_yaml_point_types("point_types_extra.yml") for k, v in extra_point_types.items(): if k in classes_point_types: classes_point_types[k].append(v) else: classes_point_types[k] = v return classes_point_types def make_module_dirs(modules): for module in modules: module_dir = join(PATH_MODULES, module) if not os.path.exists(module_dir): os.makedirs(module_dir) def is_file_different(path, text): v = open(path).read() if v != text: print("File is different: %s" % os.path.split(path)[1]) return True # print("File is the same: %s" % os.path.split(path)[1]) return False def write_if_different(files_to_write, delete_others): written = [] for base, folder, files in os.walk(PATH_MODULES): for f in files: path = join(base, f) if path in files_to_write: if is_file_different(path, files_to_write[path]): open(path, "w").write(files_to_write[path]) written.append(path) elif delete_others: os.remove(path) print("Deleted: " + path) # write new files for path, text in files_to_write.items(): if path not in written: open(path, "w").write(files_to_write[path]) def delete_other_dirs(modules): for f in os.listdir(PATH_MODULES): folder = join(PATH_MODULES, f) if f not in modules and os.path.isdir(folder): shutil.rmtree(folder, ignore_errors=True) def write_stuff_if_needed(generated_headers: OrderedDict, delete_others=True): modules = set(module for module, _ in generated_headers.keys()) make_module_dirs(modules) # hpp files_to_write = {} for (module, header_name), text in generated_headers.items(): if text: output_path = join(PATH_MODULES, module, header_name + "pp") files_to_write[output_path] = text # loaders loader_modules = defaultdict(list) for (module, header_name), text in generated_headers.items(): if text: loader_modules[module or "base"].append(header_name) for module, headers in loader_modules.items(): path_loader = join(PATH_MODULES, "_%s_loader.cpp" % module) files_to_write[path_loader] = generate_loader(module, headers) files_to_write[PATH_LOADER] = generate_main_loader(loader_modules) write_if_different(files_to_write, delete_others) if delete_others: delete_other_dirs(modules) def generate(headers_to_generate, skip_macros, not_every_point_type=False) -> OrderedDict: """ :return: OrderedDict """ main_classes, module_functions, module_variables, module_enums = {}, {}, {}, {} for module, header_name, path in headers_to_generate[:]: header_full_path = join(PCL_BASE, path) if path else join(PCL_BASE, module, header_name) header = read_header(header_full_path, skip_macros) main_classes[(module, header_name)] = get_main_classes(header, module, header_name) module_functions[(module, header_name)] = get_functions(header, module) module_variables[(module, header_name)] = get_variables(header) module_enums[(module, header_name)] = get_enums(header) classes = [c for module, header, path in headers_to_generate for c in main_classes[(module, header)]] dependency_tree = generators.dependency_tree.DependencyTree(classes) loaded_point_types = load_yaml_point_types(not_every_point_type) classes_point_types: OrderedDict = dependency_tree.get_point_types_with_dependencies(loaded_point_types) classes_sorted_base_first = list(dependency_tree.leaf_iterator()) def index_for_class(class_): return classes_sorted_base_first.index(make_namespace_class(class_["namespace"], class_["name"])) # sort classes inside modules based on inheritance for module, header in main_classes: main_classes[(module, header)] = list(sorted(main_classes[(module, header)], key=index_for_class)) headers_to_generate = sort_headers_by_dependencies(headers_to_generate, skip_macros=skip_macros) methods_need_overloading = check_if_needs_overloading(main_classes) flag_instantiatable_class(dependency_tree, main_classes) def generate_header(module, header, path, keep_if_no_instantiation) -> str: header_functions = module_functions[(module, header)] header_classes = main_classes[(module, header)] methods_defined_outside = get_methods_defined_outside(header_functions) class_definitions = generate_class_definitions(header_classes, module, header, path, methods_need_overloading.get(module), methods_defined_outside) function_definitions = generate_function_definitions(header_functions, module, header, not_every_point_type=not_every_point_type) instantiations = Instantiations(header_classes, module, header, classes_point_types, module_variables[(module, header)], module_enums[(module, header)], ) instantiation_function = instantiations.generate_instantiation_function(has_functions=bool(header_functions)) something_instantiated = len(instantiation_function.split("\n")) > 2 text = [] if something_instantiated or keep_if_no_instantiation: text = [class_definitions, function_definitions, instantiation_function] return "\n".join(text) generated_headers = OrderedDict() for module, header, path in headers_to_generate: generated_headers[(module, header)] = generate_header(module, header, path, keep_if_no_instantiation=False) return generated_headers def main(): import time t = time.time() windows = platform.system() == "Windows" skip_macros = [] skip_modules = [] if not windows: skip_macros = ["_MSC_VER"] #skip_modules = ["visualization"] skip_modules = [] all_headers = get_headers(skip_modules=skip_modules) not_every_point_type = "--not-every-point-type" in sys.argv generated_headers = generate(all_headers, skip_macros, not_every_point_type) write_stuff_if_needed(generated_headers, delete_others=True) print("generated in %.2f s" % (time.time() - t,)) if __name__ == '__main__': main()
40.22449
117
0.640589
2,351
19,710
5.065079
0.118673
0.040309
0.02956
0.012849
0.251512
0.183994
0.153594
0.1195
0.080114
0.05744
0
0.002155
0.270167
19,710
489
118
40.306748
0.825652
0.030999
0
0.157182
0
0
0.049725
0.003567
0
0
0
0
0
1
0.081301
false
0.00542
0.054201
0.00542
0.208672
0.01084
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a2b2a493d7950b9f9a43469d730e06de8f5d85d
2,372
py
Python
tests/conftest.py
aviramha/aiologstash2
08c5127bf77e3b66ddcb2e8acff82368dbc58af7
[ "MIT" ]
1
2022-02-01T10:10:05.000Z
2022-02-01T10:10:05.000Z
tests/conftest.py
aviramha/aiologstash2
08c5127bf77e3b66ddcb2e8acff82368dbc58af7
[ "MIT" ]
null
null
null
tests/conftest.py
aviramha/aiologstash2
08c5127bf77e3b66ddcb2e8acff82368dbc58af7
[ "MIT" ]
null
null
null
import asyncio import logging from json import loads import pytest from aiologstash2 import create_tcp_handler logging.getLogger().setLevel(logging.DEBUG) class FakeTcpServer: def __init__(self): self.data = bytearray() self.server = None self.futs = set() async def start(self): self.server = await asyncio.start_server(self.on_connect, host="127.0.0.1") @property def port(self): return self.server.sockets[0].getsockname()[1] @property def jsons(self): s = self.data.decode("utf8") return [loads(i) for i in s.split("\n") if i] async def close(self): if self.server is None: return self.server.close() await self.server.wait_closed() self.server = None async def on_connect(self, reader, writer): while True: data = await reader.read(1024) if not data: break self.data.extend(data) for fut in self.futs: if not fut.done(): fut.set_result(None) async def wait(self): fut = asyncio.get_event_loop().create_future() self.futs.add(fut) await fut self.futs.remove(fut) @pytest.fixture async def make_tcp_server(): servers = [] async def go(): server = FakeTcpServer() await server.start() servers.append(server) return server yield go async def finalize(): for server in servers: await server.close() await finalize() @pytest.fixture async def make_tcp_handler(make_tcp_server): handlers = [] async def go(*args, level=logging.DEBUG, **kwargs): server = await make_tcp_server() handler = await create_tcp_handler("127.0.0.1", server.port, **kwargs) handlers.append(handler) return handler, server yield go async def finalize(): for handler in handlers: handler.close() await handler.wait_closed() await finalize() @pytest.fixture async def setup_logger(make_tcp_handler): async def go(*args, **kwargs): handler, server = await make_tcp_handler(*args, **kwargs) logger = logging.getLogger("aiologstash_test") logger.addHandler(handler) return logger, handler, server yield go
23.029126
83
0.605818
291
2,372
4.831615
0.298969
0.068279
0.038407
0.044808
0.118777
0.118777
0.045519
0
0
0
0
0.01194
0.293845
2,372
102
84
23.254902
0.827463
0
0
0.186667
0
0
0.016863
0
0
0
0
0
0
1
0.04
false
0
0.066667
0.013333
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a2b9f3633d27fae8143c30667678e54429c7aa8
19,973
py
Python
tcex/playbooks/playbooks_base.py
RichieB2B/tcex
eba20a67d4b8e3596c895b7c45325716267d7c85
[ "Apache-2.0" ]
null
null
null
tcex/playbooks/playbooks_base.py
RichieB2B/tcex
eba20a67d4b8e3596c895b7c45325716267d7c85
[ "Apache-2.0" ]
null
null
null
tcex/playbooks/playbooks_base.py
RichieB2B/tcex
eba20a67d4b8e3596c895b7c45325716267d7c85
[ "Apache-2.0" ]
null
null
null
"""TcEx Framework Playbook module""" # standard library import base64 import json import re from collections import OrderedDict from collections.abc import Iterable class PlaybooksBase: """TcEx Playbook Module Base Class Args: tcex (TcEx): Instance of TcEx class. context (str): The Redis context (hash). output_variables (list): The requested output variables. """ def __init__(self, tcex, context, output_variables): """Initialize the Class properties.""" self.tcex = tcex self._context = context self._output_variables = output_variables or [] # properties self._output_variables_by_name = None self._output_variables_by_type = None self.log = tcex.log # match full variable self._variable_match = re.compile(fr'^{self._variable_pattern}$') # capture variable parts (exactly a variable) self._variable_parse = re.compile(self._variable_pattern) # match embedded variables without quotes (#App:7979:variable_name!StringArray) self._vars_keyvalue_embedded = re.compile(fr'(?:\"\:\s?)[^\"]?{self._variable_pattern}') def _coerce_string_value(self, value): """Return a string value from an bool or int.""" # coerce bool before int as python says a bool is an int if isinstance(value, bool): # coerce bool to str type self.log.warning(f'Coercing bool value ({value}) to a string ("{str(value).lower()}").') value = str(value).lower() # coerce int to str type if isinstance(value, (float, int)): self.log.warning(f'Coercing float/int value ({value}) to a string ("{str(value)}").') value = str(value) return value def _create(self, key, value, validate=True): """Create the value in Redis if applicable.""" if key is None or value is None: self.log.warning('The key or value field is None.') return None # get variable type from variable value variable_type = self.variable_type(key) if variable_type == 'Binary': # if not isinstance(value, bytes): # value = value.encode('utf-8') if validate and not isinstance(value, bytes): raise RuntimeError('Invalid data provided for Binary.') value = base64.b64encode(value).decode('utf-8') elif variable_type == 'KeyValue': if validate and (not isinstance(value, dict) or not self._is_key_value(value)): raise RuntimeError('Invalid data provided for KeyValue.') elif variable_type == 'String': # coerce string values value = self._coerce_string_value(value) if validate and not isinstance(value, str): raise RuntimeError('Invalid data provided for String.') elif variable_type == 'TCEntity': if validate and (not isinstance(value, dict) or not self._is_tc_entity(value)): raise RuntimeError('Invalid data provided for TcEntity.') # self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}') try: value = json.dumps(value) except ValueError as e: # pragma: no cover raise RuntimeError(f'Failed to serialize value ({e}).') try: return self.tcex.key_value_store.create(self._context, key.strip(), value) except RuntimeError as e: self.log.error(e) return None def _create_array(self, key, value, validate=True): """Create the value in Redis if applicable.""" if key is None or value is None: self.log.warning('The key or value field is None.') return None # get variable type from variable value variable_type = self.variable_type(key) # Enhanced entity array is the wild-wild west, don't validate it if variable_type != 'TCEnhancedEntityArray': if validate and (not isinstance(value, Iterable) or isinstance(value, (str, dict))): raise RuntimeError(f'Invalid data provided for {variable_type}.') value = [ *value ] # spread the value so that we know it's a list (as opposed to an iterable) if variable_type == 'BinaryArray': value_encoded = [] for v in value: if v is not None: if validate and not isinstance(v, bytes): raise RuntimeError('Invalid data provided for Binary.') # if not isinstance(v, bytes): # v = v.encode('utf-8') v = base64.b64encode(v).decode('utf-8') value_encoded.append(v) value = value_encoded elif variable_type == 'KeyValueArray': if validate and not self._is_key_value_array(value): raise RuntimeError('Invalid data provided for KeyValueArray.') elif variable_type == 'StringArray': value_coerced = [] for v in value: # coerce string values v = self._coerce_string_value(v) if validate and not isinstance(v, (type(None), str)): raise RuntimeError('Invalid data provided for StringArray.') value_coerced.append(v) value = value_coerced elif variable_type == 'TCEntityArray': if validate and not self._is_tc_entity_array(value): raise RuntimeError('Invalid data provided for TcEntityArray.') # self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}') try: value = json.dumps(value) except ValueError as e: # pragma: no cover raise RuntimeError(f'Failed to serialize value ({e}).') try: return self.tcex.key_value_store.create(self._context, key.strip(), value) except RuntimeError as e: self.log.error(e) return None @staticmethod def _decode_binary(data): """Return decoded bytes data handling data written by java apps.""" try: data = data.decode('utf-8') except UnicodeDecodeError: # pragma: no cover # for data written an upstream java App data = data.decode('latin-1') return data @staticmethod def _is_key_value(data): """Return True if provided data has proper structure for Key Value.""" if data is None: return False return all(x in data for x in ['key', 'value']) def _is_key_value_array(self, data): """Return True if provided data has proper structure for Key Value Array.""" for d in data: if not self._is_key_value(d): return False return True @staticmethod def _is_tc_entity(data): """Return True if provided data has proper structure for TC Entity.""" if data is None: return False return all(x in data for x in ['id', 'value', 'type']) def _is_tc_entity_array(self, data): """Return True if provided data has proper structure for TC Entity Array.""" for d in data: if not self._is_tc_entity(d): return False return True @staticmethod def _load_value(value): """Return the loaded JSON value or raise an error. Args: value (str): The data from key/value store. Raises: RuntimeError: Raise error when data can't be loaded as JSON data. Returns: any: The de-serialized value from the key/value store. """ try: return json.loads(value, object_pairs_hook=OrderedDict) except ValueError as e: # pragma: no cover raise RuntimeError(f'Failed to JSON load data "{value}" ({e}).') def _parse_output_variables(self): """Parse the output variables provided to Playbook Class. **Example Variable Format**:: ['#App:1234:status!String', '#App:1234:status_code!String'] """ self._output_variables_by_name = {} self._output_variables_by_type = {} for ov in self._output_variables: # parse the variable to get individual parts parsed_variable = self.parse_variable(ov) variable_name = parsed_variable.get('name') variable_type = parsed_variable.get('type') # store the variables in dict by name (e.g. "status_code") self._output_variables_by_name[variable_name] = {'variable': ov} # store the variables in dict by name-type (e.g. "status_code-String") self._output_variables_by_type[f'{variable_name}-{variable_type}'] = {'variable': ov} def _read(self, key, embedded=True, b64decode=True, decode=False): """Create the value in Redis if applicable.""" if key is None: self.log.warning('The key is None.') return None # get variable type from variable value variable_type = self.variable_type(key) try: value = self.tcex.key_value_store.read(self._context, key.strip()) except RuntimeError as e: self.log.error(e) return None if value is None: return value if variable_type == 'Binary': value = self._load_value(value) if b64decode: value = base64.b64decode(value) if decode: value = self._decode_binary(value) elif variable_type == 'KeyValue': # embedded variable can be unquoted, which breaks JSON. value = self._wrap_embedded_keyvalue(value) if embedded: value = self._read_embedded(value) value = self._load_value(value) elif variable_type == 'String': if embedded: value = self._read_embedded(value) # coerce string values value = self._coerce_string_value(self._load_value(value)) elif variable_type == 'TCEntity': value = self._load_value(value) return value def _read_array(self, key, embedded=True, b64decode=True, decode=False): """Create the value in Redis if applicable.""" if key is None: # pragma: no cover self.log.warning('The null value for key was provided.') return None # get variable type from variable value variable_type = self.variable_type(key) try: value = self.tcex.key_value_store.read(self._context, key.strip()) except RuntimeError as e: self.log.error(e) return None if value is None: return value if variable_type == 'BinaryArray': value = json.loads(value, object_pairs_hook=OrderedDict) values = [] for v in value: if v is not None and b64decode: v = base64.b64decode(v) if decode: v = self._decode_binary(v) values.append(v) value = values elif variable_type == 'KeyValueArray': # embedded variable can be unquoted, which breaks JSON. value = self._wrap_embedded_keyvalue(value) if embedded: value = self._read_embedded(value) try: value = json.loads(value, object_pairs_hook=OrderedDict) except ValueError as e: # pragma: no cover raise RuntimeError(f'Failed loading JSON data ({value}). Error: ({e})') elif variable_type == 'StringArray': if embedded: value = self._read_embedded(value) # convert int to str value_coerced = [] for v in self._load_value(value): # coerce string values value_coerced.append(self._coerce_string_value(v)) value = value_coerced elif variable_type in ['TCEntityArray', 'TCEnhancedEntity', 'TCEnhancedEntityArray']: value = self._load_value(value) # self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}') return value def _read_embedded(self, value): """Read method for "embedded" variables. .. Note:: The ``read()`` method will automatically determine if the input is a variable or needs to be searched for embedded variables. Embedded variable rules: * Only user input can have embedded variables. * Only String and KeyValueArray variables can have embedded variables. * Variables can only be embedded one level deep. This method will automatically covert variables embedded in a string with value retrieved from DB. If there are no keys/variables the raw string will be returned. Examples:: DB Values #App:7979:variable_name!String: "embedded \\"variable\\"" #App:7979:two!String: "two" #App:7979:variable_name!StringArray: ["one", "two", "three"] Examples 1: Input: "This input has a embedded #App:7979:variable_name!String" Examples 2: Input: ["one", #App:7979:two!String, "three"] Examples 3: Input: [{ "key": "embedded string", "value": "This input has a embedded #App:7979:variable_name!String" }, { "key": "string array", "value": #App:7979:variable_name!StringArray }, { "key": "string", "value": #App:7979:variable_name!String }] Args: value (str): The value to parsed and updated from the DB. Returns: (str): Results retrieved from DB """ if value is None: # pragma: no cover return value for variable in (v.group(0) for v in re.finditer(self._variable_parse, str(value))): v = self.read(variable) self.log.trace(f'embedded variable: {variable}, value: {v}') if isinstance(v, (dict, list)): v = json.dumps(v) # for KeyValueArray with nested dict/list type replace the # quoted value to ensure the resulting data is loadable JSON value = re.sub(f'"{variable}"', v, value) if v is not None: # only replace variable if a non-null value is returned from kv store # APP-1030 need to revisit this to handle variable references in kv/kvarrays that # are None. Would like to be able to say if value is just the variable reference, # sub None value, else insert '' in string. That would require a kv-specific # version of this method that gets the entire list/dict instead of just the string. value = re.sub(variable, v, value) return value @property def _variable_pattern(self): """Regex pattern to match and parse a playbook variable.""" variable_pattern = r'#([A-Za-z]+)' # match literal (#App,#Trigger) at beginning of String variable_pattern += r':([\d]+)' # app id (:7979) variable_pattern += r':([A-Za-z0-9_\.\-\[\]]+)' # variable name (:variable_name) variable_pattern += r'!(StringArray|BinaryArray|KeyValueArray' # variable type (array) variable_pattern += r'|TCEntityArray|TCEnhancedEntityArray' # variable type (array) variable_pattern += r'|String|Binary|KeyValue|TCEntity|TCEnhancedEntity' # variable type variable_pattern += r'|(?:(?!String)(?!Binary)(?!KeyValue)' # non matching for custom variable_pattern += r'(?!TCEntity)(?!TCEnhancedEntity)' # non matching for custom variable_pattern += r'[A-Za-z0-9_-]+))' # variable type (custom) return variable_pattern @property def _variable_array_types(self): """Return list of standard playbook array variable types.""" return [ 'BinaryArray', 'KeyValueArray', 'StringArray', 'TCEntityArray', 'TCEnhancedEntityArray', ] @property def _variable_single_types(self): """Return list of standard playbook single variable types.""" return [ 'Binary', 'KeyValue', 'String', 'TCEntity', 'TCEnhancedEntity', ] @property def _variable_types(self): """Return list of standard playbook variable typesd.""" return self._variable_single_types + self._variable_array_types def _wrap_embedded_keyvalue(self, data): """Wrap keyvalue embedded variable in double quotes. Args: data (str): The data with embedded variables. Returns: (str): Results retrieved from DB """ # TODO: need to verify if core still sends improper JSON for KeyValueArrays if data is not None: # pragma: no cover variables = [] for v in re.finditer(self._vars_keyvalue_embedded, data): variables.append(v.group(0)) for var in set(variables): # recursion over set to handle duplicates # pull (#App:1441:embedded_string!String) from (": #App:1441:embedded_string!String) variable_string = re.search(self._variable_parse, var).group(0) # reformat to replace the correct instance only, handling the case where a variable # is embedded multiple times in the same key value array. data = data.replace(var, f'": "{variable_string}"') return data def create_raw(self, key, value): """Create method of CRUD operation for raw data. ..important:: Raw data can only be a byte, str or int. Other data structures (dict, list, etc) must be serialized. Args: key (str): The variable to write to the DB. value (bytes|int|string): The data to write to the DB. Returns: (str): Result of DB write. """ data = None if key is not None and value is not None: try: data = self.tcex.key_value_store.create(self._context, key.strip(), value) except RuntimeError as e: self.log.error(e) else: self.log.warning('The key or value field was None.') return data def read_raw(self, key): """Read method of CRUD operation for raw data. ..important:: Bytes input will be returned a as string as there is no way to determine data from redis originated as bytes or string. Args: key (str): The variable to read from the DB. Returns: (str): Results retrieved from DB. """ value = None if key is not None: value = self.tcex.key_value_store.read(self._context, key.strip()) else: self.log.warning('The key field was None.') return value def parse_variable(self, variable): # pragma: no cover """Set placeholder for child method.""" raise NotImplementedError('Implemented in child class') def read(self, key, array=False, embedded=True): # pragma: no cover """Set placeholder for child method.""" raise NotImplementedError('Implemented in child class') def variable_type(self, variable): # pragma: no cover """Set placeholder for child method.""" raise NotImplementedError('Implemented in child class')
38.857977
100
0.586191
2,357
19,973
4.852355
0.134493
0.038821
0.016788
0.012591
0.496546
0.436216
0.387427
0.321063
0.270263
0.254525
0
0.007524
0.321284
19,973
513
101
38.933723
0.836161
0.313373
0
0.46831
0
0
0.129109
0.031023
0
0
0
0.001949
0
1
0.084507
false
0
0.017606
0
0.225352
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a2c1d2751e3ec16040b8d54e21b7960cfed3c22
31,780
py
Python
DeepLearningExamples/TensorFlow/LanguageModeling/BERT/run_classifier.py
puririshi98/benchmark
79f554f1e1cf36f62994c78e0e6e5b360f554022
[ "BSD-3-Clause" ]
null
null
null
DeepLearningExamples/TensorFlow/LanguageModeling/BERT/run_classifier.py
puririshi98/benchmark
79f554f1e1cf36f62994c78e0e6e5b360f554022
[ "BSD-3-Clause" ]
null
null
null
DeepLearningExamples/TensorFlow/LanguageModeling/BERT/run_classifier.py
puririshi98/benchmark
79f554f1e1cf36f62994c78e0e6e5b360f554022
[ "BSD-3-Clause" ]
null
null
null
# coding=utf-8 # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os import modeling import optimization import tokenization import tensorflow as tf import horovod.tensorflow as hvd import time from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags from utils.gpu_affinity import set_affinity import utils.dllogger_class from dllogger import Verbosity from utils.create_glue_data import * import numpy as np import tf_metrics flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "dllog_path", "/results/bert_dllog.json", "filename where dllogger writes to") flags.DEFINE_string( "optimizer_type", "lamb", "Optimizer type : adam or lamb") flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_bool("use_trt", False, "Whether to use TF-TRT") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("display_loss_steps", 10, "How often to print loss from estimator") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer("num_accumulation_steps", 1, "Number of accumulation steps before gradient update" "Global batch size = num_accumulation_steps * train_batch_size") flags.DEFINE_bool("amp", True, "Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.") flags.DEFINE_bool("use_xla", True, "Whether to enable XLA JIT compilation.") flags.DEFINE_bool("horovod", False, "Whether to use Horovod for multi-gpu runs") flags.DEFINE_bool( "verbose_logging", False, "If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") def file_based_input_fn_builder(input_file, batch_size, seq_length, is_training, drop_remainder, hvd=None): """Creates an `input_fn` closure to be passed to Estimator.""" name_to_features = { "input_ids": tf.io.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.io.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.io.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(): """The actual input function.""" # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: if hvd is not None: d = d.shard(hvd.size(), hvd.rank()) d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings, compute_type=tf.float32) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias, name='cls_logits') probabilities = tf.nn.softmax(logits, axis=-1, name='cls_probabilities') log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1, name='cls_per_example_loss') loss = tf.reduce_mean(per_example_loss, name='cls_loss') return (loss, per_example_loss, logits, probabilities) def get_frozen_tftrt_model(bert_config, shape, num_labels, use_one_hot_embeddings, init_checkpoint): tf_config = tf.compat.v1.ConfigProto() tf_config.gpu_options.allow_growth = True output_node_names = ['loss/cls_loss', 'loss/cls_per_example_loss', 'loss/cls_logits', 'loss/cls_probabilities'] with tf.Session(config=tf_config) as tf_sess: input_ids = tf.placeholder(tf.int32, shape, 'input_ids') input_mask = tf.placeholder(tf.int32, shape, 'input_mask') segment_ids = tf.placeholder(tf.int32, shape, 'segment_ids') label_ids = tf.placeholder(tf.int32, (None), 'label_ids') create_model(bert_config, False, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf_sess.run(tf.global_variables_initializer()) print("LOADED!") tf.compat.v1.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" else: init_string = ", *NOTTTTTTTTTTTTTTTTTTTTT" tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) frozen_graph = tf.graph_util.convert_variables_to_constants(tf_sess, tf_sess.graph.as_graph_def(), output_node_names) num_nodes = len(frozen_graph.node) print('Converting graph using TensorFlow-TensorRT...') from tensorflow.python.compiler.tensorrt import trt_convert as trt converter = trt.TrtGraphConverter( input_graph_def=frozen_graph, nodes_blacklist=output_node_names, max_workspace_size_bytes=(4096 << 20) - 1000, precision_mode = "FP16" if FLAGS.amp else "FP32", minimum_segment_size=4, is_dynamic_op=True, maximum_cached_engines=1000 ) frozen_graph = converter.convert() print('Total node count before and after TF-TRT conversion:', num_nodes, '->', len(frozen_graph.node)) print('TRT node count:', len([1 for n in frozen_graph.node if str(n.op) == 'TRTEngineOp'])) with tf.io.gfile.GFile("frozen_modelTRT.pb", "wb") as f: f.write(frozen_graph.SerializeToString()) return frozen_graph def model_fn_builder(task_name, bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_one_hot_embeddings, hvd=None): """Returns `model_fn` closure for Estimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for Estimator.""" def metric_fn(per_example_loss, label_ids, logits): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) if task_name == "cola": FN, FN_op = tf.metrics.false_negatives(labels=label_ids, predictions=predictions) FP, FP_op = tf.metrics.false_positives(labels=label_ids, predictions=predictions) TP, TP_op = tf.metrics.true_positives(labels=label_ids, predictions=predictions) TN, TN_op = tf.metrics.true_negatives(labels=label_ids, predictions=predictions) MCC = (TP * TN - FP * FN) / ((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)) ** 0.5 MCC_op = tf.group(FN_op, TN_op, TP_op, FP_op, tf.identity(MCC, name="MCC")) return {"MCC": (MCC, MCC_op)} elif task_name == "mrpc": accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions) loss = tf.metrics.mean(values=per_example_loss) f1 = tf_metrics.f1(labels=label_ids, predictions=predictions, num_classes=2, pos_indices=[1]) return { "eval_accuracy": accuracy, "eval_f1": f1, "eval_loss": loss, } else: accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions) loss = tf.metrics.mean(values=per_example_loss) return { "eval_accuracy": accuracy, "eval_loss": loss, } tf.compat.v1.logging.info("*** Features ***") tf.compat.v1.logging.info("*** Features ***") for name in sorted(features.keys()): tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) if not is_training and FLAGS.use_trt: trt_graph = get_frozen_tftrt_model(bert_config, input_ids.shape, num_labels, use_one_hot_embeddings, init_checkpoint) (total_loss, per_example_loss, logits, probabilities) = tf.import_graph_def(trt_graph, input_map={'input_ids':input_ids, 'input_mask':input_mask, 'segment_ids':segment_ids, 'label_ids':label_ids}, return_elements=['loss/cls_loss:0', 'loss/cls_per_example_loss:0', 'loss/cls_logits:0', 'loss/cls_probabilities:0'], name='') if mode == tf.estimator.ModeKeys.PREDICT: predictions = {"probabilities": probabilities} output_spec = tf.estimator.EstimatorSpec( mode=mode, predictions=predictions) elif mode == tf.estimator.ModeKeys.EVAL: eval_metric_ops = metric_fn(per_example_loss, label_ids, logits) output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, eval_metric_ops=eval_metric_ops) return output_spec (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} if init_checkpoint and (hvd is None or hvd.rank() == 0): (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) if FLAGS.verbose_logging: tf.compat.v1.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, hvd, False, FLAGS.amp, FLAGS.num_accumulation_steps, FLAGS.optimizer_type) output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op) elif mode == tf.estimator.ModeKeys.EVAL: dummy_op = tf.no_op() # Need to call mixed precision graph rewrite if fp16 to enable graph rewrite if FLAGS.amp: loss_scaler = tf.train.experimental.FixedLossScale(1) dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite( optimization.LAMBOptimizer(learning_rate=0.0), loss_scaler) eval_metric_ops = metric_fn(per_example_loss, label_ids, logits) output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, eval_metric_ops=eval_metric_ops) else: dummy_op = tf.no_op() # Need to call mixed precision graph rewrite if fp16 to enable graph rewrite if FLAGS.amp: dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite( optimization.LAMBOptimizer(learning_rate=0.0)) output_spec = tf.estimator.EstimatorSpec( mode=mode, predictions=probabilities) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, batch_size, seq_length, is_training, drop_remainder, hvd=None): """Creates an `input_fn` closure to be passed to Estimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(): """The actual input function.""" num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: if hvd is not None: d = d.shard(hvd.size(), hvd.rank()) d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn def main(_): setup_xla_flags() tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) dllogging = utils.dllogger_class.dllogger_class(FLAGS.dllog_path) if FLAGS.horovod: hvd.init() processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "xnli": XnliProcessor, } if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.io.gfile.makedirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) master_process = True training_hooks = [] global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps hvd_rank = 0 config = tf.compat.v1.ConfigProto() if FLAGS.horovod: tf.compat.v1.logging.info("Multi-GPU training with TF Horovod") tf.compat.v1.logging.info("hvd.size() = %d hvd.rank() = %d", hvd.size(), hvd.rank()) global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps * hvd.size() master_process = (hvd.rank() == 0) hvd_rank = hvd.rank() config.gpu_options.visible_device_list = str(hvd.local_rank()) set_affinity(hvd.local_rank()) if hvd.size() > 1: training_hooks.append(hvd.BroadcastGlobalVariablesHook(0)) if FLAGS.use_xla: config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1 if FLAGS.amp: tf.enable_resource_variables() run_config = tf.estimator.RunConfig( model_dir=FLAGS.output_dir if master_process else None, session_config=config, save_checkpoints_steps=FLAGS.save_checkpoints_steps if master_process else None, save_summary_steps=FLAGS.save_checkpoints_steps if master_process else None, log_step_count_steps=FLAGS.display_loss_steps, keep_checkpoint_max=1) if master_process: tf.compat.v1.logging.info("***** Configuaration *****") for key in FLAGS.__flags.keys(): tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key))) tf.compat.v1.logging.info("**************************") train_examples = None num_train_steps = None num_warmup_steps = None training_hooks.append(LogTrainRunHook(global_batch_size, hvd_rank, FLAGS.save_checkpoints_steps, num_steps_ignore_xla=25)) if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / global_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) start_index = 0 end_index = len(train_examples) tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record")] if FLAGS.horovod: tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record{}".format(i)) for i in range(hvd.size())] num_examples_per_rank = len(train_examples) // hvd.size() remainder = len(train_examples) % hvd.size() if hvd.rank() < remainder: start_index = hvd.rank() * (num_examples_per_rank+1) end_index = start_index + num_examples_per_rank + 1 else: start_index = hvd.rank() * num_examples_per_rank + remainder end_index = start_index + (num_examples_per_rank) model_fn = model_fn_builder( task_name=task_name, bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate if not FLAGS.horovod else FLAGS.learning_rate * hvd.size(), num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_one_hot_embeddings=False, hvd=None if not FLAGS.horovod else hvd) estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config) if FLAGS.do_train: file_based_convert_examples_to_features( train_examples[start_index:end_index], label_list, FLAGS.max_seq_length, tokenizer, tmp_filenames[hvd_rank]) tf.compat.v1.logging.info("***** Running training *****") tf.compat.v1.logging.info(" Num examples = %d", len(train_examples)) tf.compat.v1.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.compat.v1.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=tmp_filenames, batch_size=FLAGS.train_batch_size, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True, hvd=None if not FLAGS.horovod else hvd) train_start_time = time.time() estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=training_hooks) train_time_elapsed = time.time() - train_start_time train_time_wo_overhead = training_hooks[-1].total_time avg_sentences_per_second = num_train_steps * global_batch_size * 1.0 / train_time_elapsed ss_sentences_per_second = (training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size * 1.0 / train_time_wo_overhead if master_process: tf.compat.v1.logging.info("-----------------------------") tf.compat.v1.logging.info("Total Training Time = %0.2f for Sentences = %d", train_time_elapsed, num_train_steps * global_batch_size) tf.compat.v1.logging.info("Total Training Time W/O Overhead = %0.2f for Sentences = %d", train_time_wo_overhead, (training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size) tf.compat.v1.logging.info("Throughput Average (sentences/sec) with overhead = %0.2f", avg_sentences_per_second) tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second) tf.compat.v1.logging.info("-----------------------------") if FLAGS.do_eval and master_process: eval_examples = processor.get_dev_examples(FLAGS.data_dir) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.compat.v1.logging.info("***** Running evaluation *****") tf.compat.v1.logging.info(" Num examples = %d", len(eval_examples)) tf.compat.v1.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_drop_remainder = False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, batch_size=FLAGS.eval_batch_size, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) eval_hooks = [LogEvalRunHook(FLAGS.eval_batch_size)] eval_start_time = time.time() result = estimator.evaluate(input_fn=eval_input_fn, hooks=eval_hooks) eval_time_elapsed = time.time() - eval_start_time time_list = eval_hooks[-1].time_list time_list.sort() # Removing outliers (init/warmup) in throughput computation. eval_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)]) num_sentences = (int(len(time_list) * 0.8)) * FLAGS.eval_batch_size avg = np.mean(time_list) cf_50 = max(time_list[:int(len(time_list) * 0.50)]) cf_90 = max(time_list[:int(len(time_list) * 0.90)]) cf_95 = max(time_list[:int(len(time_list) * 0.95)]) cf_99 = max(time_list[:int(len(time_list) * 0.99)]) cf_100 = max(time_list[:int(len(time_list) * 1)]) ss_sentences_per_second = num_sentences * 1.0 / eval_time_wo_overhead tf.compat.v1.logging.info("-----------------------------") tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", eval_time_elapsed, eval_hooks[-1].count * FLAGS.eval_batch_size) tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", eval_time_wo_overhead, num_sentences) tf.compat.v1.logging.info("Summary Inference Statistics on EVAL set") tf.compat.v1.logging.info("Batch size = %d", FLAGS.eval_batch_size) tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length) tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32") tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000) tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000) tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second) dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT) tf.compat.v1.logging.info("-----------------------------") output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.io.gfile.GFile(output_eval_file, "w") as writer: tf.compat.v1.logging.info("***** Eval results *****") for key in sorted(result.keys()): dllogging.logger.log(step=(), data={key: float(result[key])}, verbosity=Verbosity.DEFAULT) tf.compat.v1.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict and master_process: predict_examples = processor.get_test_examples(FLAGS.data_dir) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.compat.v1.logging.info("***** Running prediction*****") tf.compat.v1.logging.info(" Num examples = %d", len(predict_examples)) tf.compat.v1.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, batch_size=FLAGS.predict_batch_size, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) predict_hooks = [LogEvalRunHook(FLAGS.predict_batch_size)] predict_start_time = time.time() output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.io.gfile.GFile(output_predict_file, "w") as writer: tf.compat.v1.logging.info("***** Predict results *****") for prediction in estimator.predict(input_fn=predict_input_fn, hooks=predict_hooks, yield_single_examples=False): output_line = "\t".join( str(class_probability) for class_probability in prediction) + "\n" writer.write(output_line) predict_time_elapsed = time.time() - predict_start_time time_list = predict_hooks[-1].time_list time_list.sort() # Removing outliers (init/warmup) in throughput computation. predict_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)]) num_sentences = (int(len(time_list) * 0.8)) * FLAGS.predict_batch_size avg = np.mean(time_list) cf_50 = max(time_list[:int(len(time_list) * 0.50)]) cf_90 = max(time_list[:int(len(time_list) * 0.90)]) cf_95 = max(time_list[:int(len(time_list) * 0.95)]) cf_99 = max(time_list[:int(len(time_list) * 0.99)]) cf_100 = max(time_list[:int(len(time_list) * 1)]) ss_sentences_per_second = num_sentences * 1.0 / predict_time_wo_overhead tf.compat.v1.logging.info("-----------------------------") tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", predict_time_elapsed, predict_hooks[-1].count * FLAGS.predict_batch_size) tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", predict_time_wo_overhead, num_sentences) tf.compat.v1.logging.info("Summary Inference Statistics on TEST SET") tf.compat.v1.logging.info("Batch size = %d", FLAGS.predict_batch_size) tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length) tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32") tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000) tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000) tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second) dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT) tf.compat.v1.logging.info("-----------------------------") if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.compat.v1.app.run()
42.204515
136
0.685809
4,420
31,780
4.673529
0.13371
0.025948
0.032435
0.051847
0.480225
0.441884
0.397202
0.367236
0.314034
0.276855
0
0.017553
0.196916
31,780
752
137
42.260638
0.791827
0.064317
0
0.274956
0
0
0.17041
0.014636
0
0
0
0
0
1
0.019264
false
0
0.036778
0
0.078809
0.012259
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a2c56df110778d23c3bb4671a208f95d1915011
69,038
py
Python
FusionIIIT/applications/academic_information/views.py
29rj/Fusion
bc2941a67532e183adeb0bc4042df0b182b9e3aa
[ "bzip2-1.0.6" ]
29
2019-02-20T15:35:33.000Z
2022-03-22T11:10:57.000Z
FusionIIIT/applications/academic_information/views.py
29rj/Fusion
bc2941a67532e183adeb0bc4042df0b182b9e3aa
[ "bzip2-1.0.6" ]
409
2019-01-17T19:30:51.000Z
2022-03-31T16:28:45.000Z
FusionIIIT/applications/academic_information/views.py
29rj/Fusion
bc2941a67532e183adeb0bc4042df0b182b9e3aa
[ "bzip2-1.0.6" ]
456
2019-01-12T11:01:13.000Z
2022-03-30T17:06:52.000Z
import datetime import json import os import xlrd import logging from io import BytesIO from xlsxwriter.workbook import Workbook from xhtml2pdf import pisa from itertools import chain from django.contrib.auth.models import User from django.http import HttpResponse, HttpResponseRedirect, JsonResponse from django.shortcuts import get_object_or_404, render from django.template.loader import get_template from django.views.decorators.csrf import csrf_exempt from django.template.loader import render_to_string from django.contrib.auth.decorators import login_required from applications.academic_procedures.models import MinimumCredits, Register, InitialRegistration, course_registration, AssistantshipClaim,Assistantship_status from applications.globals.models import (Designation, ExtraInfo, HoldsDesignation, DepartmentInfo) from .forms import AcademicTimetableForm, ExamTimetableForm, MinuteForm from .models import (Calendar, Course, Exam_timetable, Grades, Curriculum_Instructor,Constants, Meeting, Student, Student_attendance, Timetable,Curriculum) from applications.programme_curriculum.models import (CourseSlot, Course as Courses, Batch, Semester, Programme, Discipline) from applications.academic_procedures.views import acad_proced_global_context from applications.programme_curriculum.models import Batch @login_required def user_check(request): """ This function is used to check the type of user. It checkes the authentication of the user. @param: request - contains metadata about the requested page @variables: current_user - get user from request user_details - extract details of user from database desig_id - check for designation acadadmin - designation for Acadadmin final_user - final designation of request user """ try: current_user = get_object_or_404(User, username=request.user.username) user_details = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first() desig_id = Designation.objects.all().filter(name='Upper Division Clerk') temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first() acadadmin = temp.working k = str(user_details).split() final_user = k[2] except Exception as e: acadadmin="" final_user="" pass if (str(acadadmin) != str(final_user)): return True else: return False def get_context(request): """ This function gets basic gata from database to send to template @param: request - contains metadata about the requested page @variables: acadTtForm - the form to add academic calender examTtForm - the form required to add exam timetable exam_t - all the exam timetable objects timetable - all the academic timetable objects calendar - all the academic calender objects context - the datas to be displayed in the webpage this_sem_course - tha data of thsi semester courses next_sem_courses - the data of next semester courses courses - all the courses in curriculum course_type - list the type of courses """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') course_list = sem_for_generate_sheet() if(course_list[0]==1): course_list_2 = [2, 4, 6, 8] else: course_list_2 = [1, 3, 5, 7] # examTtForm = ExamTimetableForm() # acadTtForm = AcademicTimetableForm() # calendar = Calendar.objects.all() # this_sem_courses = Curriculum.objects.all().filter(sem__in=course_list).filter(floated=True) # next_sem_courses = Curriculum.objects.all().filter(sem__in=course_list).filter(floated=True) # courses = Course.objects.all() # course_type = Constants.COURSE_TYPE # timetable = Timetable.objects.all() # exam_t = Exam_timetable.objects.all() procedures_context = acad_proced_global_context() try: examTtForm = ExamTimetableForm() acadTtForm = AcademicTimetableForm() calendar = Calendar.objects.all() this_sem_courses = Curriculum.objects.all().select_related().filter(sem__in=course_list).filter(floated=True) next_sem_courses = Curriculum.objects.all().select_related().filter(sem__in=course_list_2).filter(floated=True) courses = Course.objects.all() courses_list = Courses.objects.all() course_type = Constants.COURSE_TYPE timetable = Timetable.objects.all() exam_t = Exam_timetable.objects.all() pgstudent = Student.objects.filter(programme = "M.Tech") | Student.objects.filter(programme = "PhD") assistant_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval =True).filter(acad_approval = False) assistant_approve_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval =True).filter(hod_approval = True) assistant_list_length = len(assistant_list.filter(acad_approval = False)) assis_stat = Assistantship_status.objects.all() for obj in assis_stat: assistant_flag = obj.student_status hod_flag = obj.hod_status account_flag = obj.account_status except Exception as e: examTtForm = "" acadTtForm = "" calendar = "" this_sem_courses = "" next_sem_courses = "" courses = "" course_type = "" timetable = "" exam_t = "" pass context = { 'acadTtForm': acadTtForm, 'examTtForm': examTtForm, 'courses': courses, 'courses_list': courses_list, 'course_type': course_type, 'exam': exam_t, 'timetable': timetable, 'academic_calendar': calendar, 'next_sem_course': next_sem_courses, 'this_sem_course': this_sem_courses, 'curriculum': curriculum, 'pgstudent' : pgstudent, 'assistant_list' : assistant_list, 'assistant_approve_list' : assistant_approve_list, 'assistant_list_length' : assistant_list_length, 'tab_id': ['1','1'], 'context': procedures_context['context'], 'lists': procedures_context['lists'], 'date': procedures_context['date'], 'query_option1': procedures_context['query_option1'], 'query_option2': procedures_context['query_option2'], 'course_verification_date' : procedures_context['course_verification_date'], 'submitted_course_list' : procedures_context['submitted_course_list'], 'result_year' : procedures_context['result_year'], 'batch_grade_data' : procedures_context['batch_grade_data'], 'batch_branch_data' : procedures_context['batch_branch_data'], 'assistant_flag' : assistant_flag, 'hod_flag' : hod_flag, 'account_flag' : account_flag } return context @login_required def homepage(request): """ This function is used to set up the homepage of the application. It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: senates - the extraInfo objects that holds the designation as a senator students - all the objects in the Student class Convenor - the extraInfo objects that holds the designation as a convenor CoConvenor - the extraInfo objects that holds the designation as a coconvenor meetings - the all meeting objects held in senator meetings minuteForm - the form to add a senate meeting minutes acadTtForm - the form to add academic calender examTtForm - the form required to add exam timetable Dean - the extraInfo objects that holds the designation as a dean student - the students as a senator extra - all the extraInfor objects exam_t - all the exam timetable objects timetable - all the academic timetable objects calendar - all the academic calender objects department - all the departments in the college attendance - all the attendance objects of the students context - the datas to be displayed in the webpage """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') context = get_context(request) return render(request, "ais/ais.html", context) # #################################### # # curriculum # # #################################### @login_required def curriculum(request): """ This function is used to see curriculum and edit entries in a curriculum. It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: request_batch - Batch from form request_branch - Branch from form request_programme - Programme from form request_sem - Semester from form curriculum - Get data about curriculum from database courses - get courses from database courses_type - get course types from database """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') context = get_context(request) context['tab_id'][0]='6' if request.method == 'POST': try: request_batch = request.POST['batch'] request_branch = request.POST['branch'] request_programme = request.POST['programme'] request_sem = request.POST['sem'] except Exception as e: request_batch = "" request_branch = "" request_programme = "" request_sem = "" #for checking if the user has searched for any particular curriculum if request_batch == "" and request_branch == "" and request_programme=="" and request_sem=="": curriculum = None #Curriculum.objects.all() else: if int(request_sem) == 0: curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).order_by('sem') else: curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).filter(sem= request_sem) # context={ # 'courses' : courses, # 'course_type' : course_type, # 'curriculum' : curriculum, # 'tab_id' :['3','1'] # } courses = Course.objects.all() course_type = Constants.COURSE_TYPE html = render_to_string('ais/curr_list.html',{'curriculum':curriculum,'courses':courses,'course_type':course_type},request) obj = json.dumps({'html':html}) #return render(request, "ais/ais.html", context) return HttpResponse(obj,content_type='application/json') else: return render(request, "ais/ais.html", context) return render(request, "ais/ais.html", context) @login_required def add_curriculum(request): """ This function is used to add new curriculum in database It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: programme - programme from form.REQUEST batch - batch from form.REQUEST branch - branch from form.REQUEST sem - semester from form.REQUEST course_code - course_code from form.REQUEST course_name - course-name from form.REQUEST course_id - course_id from database credits - credits from form.REQUEST optional - optional from form.REQUEST course_type - course_type from form.REQUEST ins - data is stored in database """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') context={ 'tab_id' :['3','2'] } if request.method == 'POST': i=0 new_curr=[] while True: if "semester_"+str(i) in request.POST: try: programme=request.POST['AddProgramme'] batch=request.POST['AddBatch'] branch=request.POST['AddBranch'] sem=request.POST["semester_"+str(i)] course_code=request.POST["course_code_"+str(i)] course_name=request.POST["course_name_"+str(i)] course_id=Course.objects.get(course_name=course_name) credits=request.POST["credits_"+str(i)] if "optional_"+str(i) in request.POST: optional=True else: optional=False course_type=request.POST["course_type_"+str(i)] except Exception as e: programme="" batch="" branch="" sem="" course_code="" course_name="" course_id="" credits="" optional="" course_type="" pass ins=Curriculum( programme=programme, batch=batch, branch=branch, sem=sem, course_code=course_code, course_id=course_id, credits=credits, optional=optional, course_type=course_type, ) new_curr.append(ins) else: break i+=1 Curriculum.objects.bulk_create(new_curr) curriculum = Curriculum.objects.select_related().filter(branch = branch).filter(batch = batch).filter(programme= programme) courses = Course.objects.all() course_type = Constants.COURSE_TYPE context= { 'courses': courses, 'course_type': course_type, 'curriculum': curriculum, 'tab_id' :['3','2'] } return render(request, "ais/ais.html", context) else: return render(request, "ais/ais.html", context) return render(request, "ais/ais.html", context) @login_required def edit_curriculum(request): """ This function is used to edit curriculum in database It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: programme - programme from form.REQUEST batch - batch from form.REQUEST branch - branch from form.REQUEST sem - semester from form.REQUEST course_code - course_code from form.REQUEST course_name - course-name from form.REQUEST course_id - course_id from database credits - credits from form.REQUEST optional - optional from form.REQUEST course_type - course_type from form.REQUEST ins - data is stored in database """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') context={ 'tab_id' :['3','1'] } if request.method == 'POST': try: id=request.POST['id'] programme=request.POST['programme'] batch=request.POST['batch'] branch=request.POST['branch'] sem=request.POST["sem"] course_code=request.POST["course_code"] course_name=request.POST["course_id"] course_id=Course.objects.get(course_name=course_name) credits=request.POST["credits"] if request.POST['optional'] == "on": optional=True else: optional=False course_type=request.POST["course_type"] except Exception as e: id="" programme="" batch="" branch="" sem="" course_code="" course_name="" course_id="" credits="" optional="" course_type="" pass entry=Curriculum.objects.all().select_related().filter(curriculum_id=id).first() entry.programme=programme entry.batch=batch entry.branch=branch entry.sem=sem entry.course_code=course_code entry.course_id=course_id entry.credits=credits entry.optional=optional entry.course_type=course_type entry.save() curriculum = Curriculum.objects.select_related().filter(branch = branch).filter(batch = batch).filter(programme= programme) courses = Course.objects.all() course_type = Constants.COURSE_TYPE context= { 'courses': courses, 'course_type': course_type, 'curriculum': curriculum, 'tab_id' :['3','1'] } return render(request, "ais/ais.html", context) else: return render(request, "ais/ais.html", context) return render(request, "ais/ais.html", context) @login_required def delete_curriculum(request): """ This function is used to delete curriculum entry in database It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: dele - data being deleted from database """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') context={ 'tab_id' :['3','1'] } if request.method == "POST": dele = Curriculum.objects.select_related().filter(curriculum_id=request.POST['id']) dele.delete() curriculum = Curriculum.objects.select_related().filter(branch = request.POST['branch']).filter(batch = request.POST['batch']).filter(programme= request.POST['programme']) courses = Course.objects.all() course_type = Constants.COURSE_TYPE context= { 'courses': courses, 'course_type': course_type, 'curriculum': curriculum, 'tab_id' :['3','1'] } return render(request, "ais/ais.html", context) return render(request, 'ais/ais.html', context) @login_required def next_curriculum(request): """ This function is used to decide curriculum for new batch. It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: programme - programme from form.REQUEST now - current date from system year - current year batch - batch form form curriculum - curriculum details form database ins - Inster data in database """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') if request.method == 'POST': programme = request.POST['programme'] now = datetime.datetime.now() year = int(now.year) batch = year-1 curriculum = Curriculum.objects.all().select_related().filter(batch = batch).filter(programme = programme) if request.POST['option'] == '1': new_curriculum=[] for i in curriculum: ins=Curriculum( programme=i.programme, batch=i.batch+1, branch=i.branch, sem=i.sem, course_code=i.course_code, course_id=i.course_id, credits=i.credits, optional=i.optional, course_type=i.course_type, ) new_curriculum.append(ins) Curriculum.objects.bulk_create(new_curriculum) elif request.POST['option'] == '2': new_curriculum=[] for i in curriculum: ins=Curriculum( programme=i.programme, batch=i.batch+1, branch=i.branch, sem=i.sem, course_code=i.course_code, course_id=i.course_id, credits=i.credits, optional=i.optional, course_type=i.course_type, ) new_curriculum.append(ins) Curriculum.objects.bulk_create(new_curriculum) batch=batch+1 curriculum = Curriculum.objects.all().select_related().filter(batch = batch).filter(programme = programme) context= { 'curriculumm' :curriculum, 'tab_id' :['3','3'] } return render(request, "ais/ais.html", context) else: context= { 'tab_id' :['3','2'] } return render(request, "ais/ais.html", context) context= { 'tab_id' :['3','1'] } return render(request, "ais/ais.html", context) @login_required def add_timetable(request): """ acad-admin can upload the time table(any type of) of the semester. @param: request - contains metadata about the requested page. @variables: acadTtForm - data of delete dictionary in post request timetable - all timetable from database exam_t - all exam timetable from database """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') timetable = Timetable.objects.all() exam_t = Exam_timetable.objects.all() context= { 'exam': exam_t, 'timetable': timetable, 'tab_id' :['10','1'] } acadTtForm = AcademicTimetableForm() if request.method == 'POST' and request.FILES: acadTtForm = AcademicTimetableForm(request.POST, request.FILES) if acadTtForm.is_valid(): acadTtForm.save() return render(request, "ais/ais.html", context) else: return render(request, "ais/ais.html", context) return render(request, "ais/ais.html", context) @login_required def add_exam_timetable(request): """ acad-admin can upload the exam timtable of the ongoing semester. @param: request - contains metadata about the requested page. @variables: examTtForm - data of delete dictionary in post request timetable - all timetable from database exam_t - all exam timetable from database """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') timetable = Timetable.objects.all() exam_t = Exam_timetable.objects.all() context= { 'exam': exam_t, 'timetable': timetable, 'tab_id' :['10','2'] } examTtForm = ExamTimetableForm() if request.method == 'POST' and request.FILES: examTtForm = ExamTimetableForm(request.POST, request.FILES) if examTtForm.is_valid(): examTtForm.save() return render(request, "ais/ais.html", context) else: return render(request, "ais/ais.html", context) return render(request, "ais/ais.html", context) @login_required def delete_timetable(request): """ acad-admin can delete the outdated timetable from the server. @param: request - contains metadata about the requested page. @variables: data - data of delete dictionary in post request t - Object of time table to be deleted """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') if request.method == "POST": data = request.POST['delete'] t = Timetable.objects.get(time_table=data) t.delete() return HttpResponse("TimeTable Deleted") @login_required def delete_exam_timetable(request): """ acad-admin can delete the outdated exam timetable. @param: request - contains metadata about the requested page. @variables: data - data of delete dictionary in post request t - Object of Exam time table to be deleted """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') if request.method == "POST": data = request.POST['delete'] t = Exam_timetable.objects.get(exam_time_table=data) t.delete() return HttpResponse("TimeTable Deleted") @login_required def add_calendar(request): """ to add an entry to the academic calendar to be uploaded @param: request - contains metadata about the requested page. @variables: from_date - The starting date for the academic calendar event. to_date - The ending date for the academic caldendar event. desc - Description for the academic calendar event. c = object to save new event to the academic calendar. """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') calendar = Calendar.objects.all() context= { 'academic_calendar' :calendar, 'tab_id' :['4','1'] } if request.method == "POST": try: from_date = request.POST.getlist('from_date') to_date = request.POST.getlist('to_date') desc = request.POST.getlist('description')[0] from_date = from_date[0].split('-') from_date = [int(i) for i in from_date] from_date = datetime.datetime(*from_date).date() to_date = to_date[0].split('-') to_date = [int(i) for i in to_date] to_date = datetime.datetime(*to_date).date() except Exception as e: from_date="" to_date="" desc="" pass c = Calendar( from_date=from_date, to_date=to_date, description=desc) c.save() HttpResponse("Calendar Added") return render(request, "ais/ais.html", context) @login_required def update_calendar(request): """ to update an entry to the academic calendar to be updated. @param: request - contains metadata about the requested page. @variables: from_date - The starting date for the academic calendar event. to_date - The ending date for the academic caldendar event. desc - Description for the academic calendar event. prev_desc - Description for the previous event which is to be updated. get_calendar_details = Get the object of the calendar instance from the database for the previous Description. """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') calendar = Calendar.objects.all() context= { 'academic_calendar' :calendar, 'tab_id' :['4','1'] } if request.method == "POST": try: from_date = request.POST.getlist('from_date') to_date = request.POST.getlist('to_date') desc = request.POST.getlist('description')[0] prev_desc = request.POST.getlist('prev_desc')[0] from_date = from_date[0].split('-') from_date = [int(i) for i in from_date] from_date = datetime.datetime(*from_date).date() to_date = to_date[0].split('-') to_date = [int(i) for i in to_date] to_date = datetime.datetime(*to_date).date() get_calendar_details = Calendar.objects.all().filter(description=prev_desc).first() get_calendar_details.description = desc get_calendar_details.from_date = from_date get_calendar_details.to_date = to_date get_calendar_details.save() except Exception as e: from_date="" to_date="" desc="" return render(request, "ais/ais.html", context) return render(request, "ais/ais.html", context) #Generate Attendance Sheet def sem_for_generate_sheet(): """ This function generates semester grade sheet @variables: now - current datetime month - current month """ now = datetime.datetime.now() month = int(now.month) if month >= 7 and month <= 12: return [1, 3, 5, 7] else: return [2, 4, 6, 8] @login_required def generatexlsheet(request): """ to generate Course List of Registered Students @param: request - contains metadata about the requested page @variables: batch - gets the batch course - gets the course curr_key - gets the curriculum from database obj - get stdents data from database ans - Formatted Array to be converted to xlsx k -temporary array to add data to formatted array/variable output - io Bytes object to write to xlsx file book - workbook of xlsx file title - formatting variable of title the workbook subtitle - formatting variable of subtitle the workbook normaltext - formatting variable for normal text sheet - xlsx sheet to be rendered titletext - formatting variable of title text dep - temporary variables z - temporary variables for final output b - temporary variables for final output c - temporary variables for final output st - temporary variables for final output """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') try: batch = request.POST['batch'] course = Courses.objects.get(id = request.POST['course']) obj = course_registration.objects.all().filter(course_id = course) except Exception as e: batch="" course="" curr_key="" obj="" registered_courses = [] for i in obj: if i.student_id.batch_id.year == int(batch): registered_courses.append(i) ans = [] for i in registered_courses: k = [] k.append(i.student_id.id.id) k.append(i.student_id.id.user.first_name) k.append(i.student_id.id.user.last_name) k.append(i.student_id.id.department) ans.append(k) ans.sort() output = BytesIO() book = Workbook(output,{'in_memory':True}) title = book.add_format({'bold': True, 'font_size': 22, 'align': 'center', 'valign': 'vcenter'}) subtitle = book.add_format({'bold': True, 'font_size': 15, 'align': 'center', 'valign': 'vcenter'}) normaltext = book.add_format({'bold': False, 'font_size': 15, 'align': 'center', 'valign': 'vcenter'}) sheet = book.add_worksheet() title_text = ((str(course.name)+" : "+str(str(batch)))) sheet.set_default_row(25) sheet.merge_range('A2:E2', title_text, title) sheet.write_string('A3',"Sl. No",subtitle) sheet.write_string('B3',"Roll No",subtitle) sheet.write_string('C3',"Name",subtitle) sheet.write_string('D3',"Discipline",subtitle) sheet.write_string('E3','Signature',subtitle) sheet.set_column('A:A',20) sheet.set_column('B:B',20) sheet.set_column('C:C',60) sheet.set_column('D:D',15) sheet.set_column('E:E',30) k = 4 num = 1 for i in ans: sheet.write_number('A'+str(k),num,normaltext) num+=1 z,b,c = str(i[0]),i[1],i[2] name = str(b)+" "+str(c) temp = str(i[3]).split() dep = str(temp[len(temp)-1]) sheet.write_string('B'+str(k),z,normaltext) sheet.write_string('C'+str(k),name,normaltext) sheet.write_string('D'+str(k),dep,normaltext) k+=1 book.close() output.seek(0) response = HttpResponse(output.read(),content_type = 'application/vnd.ms-excel') st = 'attachment; filename = ' + course.code + '.xlsx' response['Content-Disposition'] = st return response @login_required def generate_preregistration_report(request): """ to generate preresgistration report after pre-registration @param: request - contains metadata about the requested page @variables: sem - get current semester from current time now - get current time year - getcurrent year batch - gets the batch from form sem - stores the next semester obj - All the registration details appended into one data - Formated data for context m - counter for Sl. No (in formated data) z - temporary array to add data to variable data k -temporary array to add data to formatted array/variable output - io Bytes object to write to xlsx file book - workbook of xlsx file title - formatting variable of title the workbook subtitle - formatting variable of subtitle the workbook normaltext - formatting variable for normal text sheet - xlsx sheet to be rendered titletext - formatting variable of title text dep - temporary variables z - temporary variables for final output b - temporary variables for final output c - temporary variables for final output st - temporary variables for final output """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') if request.method == "POST": sem = request.POST.get('semester_no') batch_id=request.POST.get('batch_branch') batch = Batch.objects.filter(id = batch_id).first() obj = InitialRegistration.objects.filter(student_id__batch_id=batch_id, semester_id__semester_no=sem) registered_students = set() unregistered_students = set() for stu in obj: registered_students.add(stu.student_id) students = Student.objects.filter(batch_id = batch_id) for stu in students: if stu not in registered_students: unregistered_students.add(stu) data = [] m = 1 for i in unregistered_students: z = [] z.append(m) m += 1 z.append(i.id.user.username) z.append(str(i.id.user.first_name)+" "+str(i.id.user.last_name)) z.append(i.id.department.name) z.append('not registered') data.append(z) for i in registered_students: z = [] z.append(m) m += 1 z.append(i.id.user.username) z.append(str(i.id.user.first_name)+" "+str(i.id.user.last_name)) z.append(i.id.department.name) z.append('registered') data.append(z) output = BytesIO() book = Workbook(output,{'in_memory':True}) title = book.add_format({'bold': True, 'font_size': 22, 'align': 'center', 'valign': 'vcenter'}) subtitle = book.add_format({'bold': True, 'font_size': 15, 'align': 'center', 'valign': 'vcenter'}) normaltext = book.add_format({'bold': False, 'font_size': 15, 'align': 'center', 'valign': 'vcenter'}) sheet = book.add_worksheet() title_text = ("Pre-registeration : "+ batch.name + str(" ") + batch.discipline.acronym + str(" ") + str(batch.year)) sheet.set_default_row(25) sheet.merge_range('A2:E2', title_text, title) sheet.write_string('A3',"Sl. No",subtitle) sheet.write_string('B3',"Roll No",subtitle) sheet.write_string('C3',"Name",subtitle) sheet.write_string('D3',"Discipline",subtitle) sheet.write_string('E3','Status',subtitle) sheet.set_column('A:A',20) sheet.set_column('B:B',20) sheet.set_column('C:C',50) sheet.set_column('D:D',15) sheet.set_column('E:E',15) k = 4 num = 1 for i in data: sheet.write_number('A'+str(k),num,normaltext) num+=1 z,b,c = str(i[0]),i[1],i[2] a,b,c,d,e = str(i[0]),str(i[1]),str(i[2]),str(i[3]),str(i[4]) temp = str(i[3]).split() sheet.write_string('B'+str(k),b,normaltext) sheet.write_string('C'+str(k),c,normaltext) sheet.write_string('D'+str(k),d,normaltext) sheet.write_string('E'+str(k),e,normaltext) k+=1 book.close() output.seek(0) response = HttpResponse(output.read(),content_type = 'application/vnd.ms-excel') st = 'attachment; filename = ' + batch.name + batch.discipline.acronym + str(batch.year) + '-preresgistration.xlsx' response['Content-Disposition'] = st return response @login_required def add_new_profile (request): """ To add details of new upcoming students in the database.User must be logged in and must be acadadmin @param: request - contains metadata about the requested page. @variables: profiles - gets the excel file having data excel - excel file sheet - sheet no in excel file roll_no - details of student from file first_name - details of student from file last_name - details of student from file email - details of student from file sex - details of student from file title - details of student from file dob - details of student from file fathers_name - details of student from file mothers_name - details of student from file category - details of student from file phone_no - details of student from file address - details of student from file department - details of student from file specialization - details of student from file hall_no - details of student from file programme - details of student from file batch - details of student from file user - new user created in database einfo - new extrainfo object created in database stud_data - new student object created in database desig - get designation object of student holds_desig - get hold_desig object of student currs - get curriculum details reg - create registeration object in registeration table """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') context= { 'tab_id' :['2','1'] } if request.method == 'POST' and request.FILES: profiles=request.FILES['profiles'] excel = xlrd.open_workbook(file_contents=profiles.read()) sheet=excel.sheet_by_index(0) for i in range(sheet.nrows): roll_no=int(sheet.cell(i,0).value) first_name=str(sheet.cell(i,1).value) last_name=str(sheet.cell(i,2).value) email=str(sheet.cell(i,3).value) sex=str(sheet.cell(i,4).value) if sex == 'F': title='Ms.' else: title='Mr.' dob_tmp=sheet.cell(i,5).value dob_tmp=sheet.cell_value(rowx=i,colx=5) dob=datetime.datetime(*xlrd.xldate_as_tuple(dob_tmp,excel.datemode)) fathers_name=str(sheet.cell(i,6).value) mothers_name=str(sheet.cell(i,7).value) category=str(sheet.cell(i,8).value) phone_no=int(sheet.cell(i,9).value) address=str(sheet.cell(i,10).value) dept=str(sheet.cell(i,11).value) specialization=str(sheet.cell(i,12).value) hall_no=sheet.cell(i,13 ).value department=DepartmentInfo.objects.all().filter(name=dept).first() if specialization == "": specialization="None" if hall_no == None: hall_no=3 else: hall_no=int(hall_no) programme_name=request.POST['Programme'] batch_year=request.POST['Batch'] batch = Batch.objects.all().filter(name = programme_name, discipline__acronym = dept, year = batch_year).first() user = User.objects.create_user( username=roll_no, password='hello123', first_name=first_name, last_name=last_name, email=email, ) einfo = ExtraInfo.objects.create( id=roll_no, user=user, title=title, sex=sex, date_of_birth=dob, address=address, phone_no=phone_no, user_type='student', department=department, ) sem=1 stud_data = Student.objects.create( id=einfo, programme = programme_name, batch=batch_year, batch_id = batch, father_name = fathers_name, mother_name = mothers_name, cpi = 0, category = category, hall_no = hall_no, specialization = specialization, curr_semester_no=sem, ) desig = Designation.objects.get(name='student') hold_des = HoldsDesignation.objects.create( user=user, working=user, designation=desig, ) sem_id = Semester.objects.get(curriculum = batch.curriculum, semester_no = sem) course_slots = CourseSlot.objects.all().filter(semester = sem_id) courses = [] for course_slot in course_slots: courses += course_slot.courses.all() new_reg=[] for c in courses: reg=course_registration( course_id = c, semester_id=sem_id, student_id=stud_data ) new_reg.append(reg) course_registration.objects.bulk_create(new_reg) else: return render(request, "ais/ais.html", context) return render(request, "ais/ais.html", context) def get_faculty_list(): """ to get faculty list from database @param: request - contains metadata about the requested page. @variables: f1,f2,f3 - temporary varibles faculty - details of faculty of data faculty_list - list of faculty """ try: f1 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Assistant Professor")) f2 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Professor")) f3 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Associate Professor")) except Exception as e: f1=f2=f3="" pass faculty = list(chain(f1,f2,f3)) faculty_list = [] for i in faculty: faculty_list.append(i) return faculty_list @login_required def float_course(request): """ to float courses for the next sem and store data in databsae. User must be logged in and must be acadadmin @param: request - contains metadata about the requested page. @variables: request_batch - Batch from form request_branch - Branch from form request_programme - Programme from form request_sem - Semester from form """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') context= { 'tab_id' :['5','1'] } if request.method == 'POST': try: request_batch = request.POST['batch'] request_branch = request.POST['branch'] request_programme = request.POST['programme'] except Exception as e: request_batch = "" request_branch = "" request_programme = "" if request_batch == "" and request_branch == "" and request_programme=="": curriculum = None #Curriculum.objects.all() else: sem = sem_for_generate_sheet() now = datetime.datetime.now() year = int(now.year) if sem[0] == 2: sem = sem[year-int(request_batch)-1] else: sem = sem[year-int(request_batch)] sem+=1 curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).filter(sem=sem).order_by('course_code') faculty_list = get_faculty_list() courses = Course.objects.all() course_type = Constants.COURSE_TYPE context= { 'courses': courses, 'course_type': course_type, 'curriculum': curriculum, 'faculty_list': faculty_list, 'tab_id' :['5','1'] } return render(request, "ais/ais.html", context) else: return render(request, "ais/ais.html", context) return render(request, "ais/ais.html", context) @login_required def float_course_submit(request): """ to float courses for the next sem and store data in databsae. User must be logged in and must be acadadmin @param: request - contains metadata about the requested page. @variables: request_batch - Batch from form request_branch - Branch from form request_programme - Programme from form request_sem - Semester from form """ if user_check(request): return HttpResponseRedirect('/academic-procedures/') context= { 'tab_id' :['5','1'] } if request.method == "POST": i=1 while True: if str(i)+"_ccode" in request.POST: if str(i)+"_fac" in request.POST: if request.POST[str(i)+"_fac"] == "" : logging.warning("No faculty") else: flot = Curriculum.objects.select_related().get(curriculum_id=request.POST[str(i)+"_ccode"]) flot.floated = True flot.save() new_curr_inst=[] for c,i in enumerate(request.POST.getlist(str(i)+'_fac')): inst = get_object_or_404(User, username = i) inst = ExtraInfo.objects.select_related('user','department').get(user=inst) if c==0: ins=Curriculum_Instructor( curriculum_id=flot, instructor_id=inst, chief_inst=True, ) new_curr_inst.append(ins) else: ins=Curriculum_Instructor( curriculum_id=flot, instructor_id=inst, chief_inst=False, ) new_curr_inst.append(ins) Curriculum_Instructor.objects.bulk_create(new_curr_inst) else: break i+=1 return render(request, "ais/ais.html", context) # # ---------------------senator------------------ # @csrf_exempt def senator(request): # """ # to add a new student senator # @param: # request - contains metadata about the requested page # @variables: # current_user - gets the data of current user. # user_details - gets the details of the required user. # desig_id - used to check the designation ID. # extraInfo - extraInfo object of the student with that rollno # s - designation object of senator # hDes - holdsDesignation object to store that the particualr student is holding the senator designation # student - the student object of the new senator # data - data of the student to be displayed in teh webpage # """ # current_user = get_object_or_404(User, username=request.user.username) # user_details = ExtraInfo.objects.all().filter(user=current_user).first() # desig_id = Designation.objects.all().filter(name='Upper Division Clerk') temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first() #print (temp) # print (current_user) # acadadmin = temp.working # k = str(user_details).split() # print(k) # final_user = k[2] # if (str(acadadmin) != str(final_user)): # return HttpResponseRedirect('/academic-procedures/') # if request.method == 'POST': # print(request.POST, ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") # rollno = request.POST.getlist('Roll Number')[0] # # print(request.POST.get('rollno')) # extraInfo = ExtraInfo.objects.get(id=rollno) # s = Designation.objects.get(name='Senator') # hDes = HoldsDesignation() # hDes.user = extraInfo.user # hDes.working = extraInfo.user # hDes.designation = s # hDes.save() # student = Student.objects.get(id=extraInfo) # data = { # 'name': extraInfo.user.username, # 'rollno': extraInfo.id, # 'programme': student.programme, # 'branch': extraInfo.department.name # } # return HttpResponseRedirect('/aims/') # # return JsonResponse(data) # else: # return HttpResponseRedirect('/aims/') # @csrf_exempt def deleteSenator(request, pk): # """ # to remove a senator from the position # @param: # request - contains metadata about the requested page # @variables: # s - the designation object that contains senator # student - the list students that is a senator # hDes - the holdDesignation object that stores the # information that the particular student is a senator # """ pass # if request.POST: # s = get_object_or_404(Designation, name="Senator") # student = get_object_or_404(ExtraInfo, id=request.POST.getlist("senate_id")[0]) # hDes = get_object_or_404( HoldsDesignation, user = student.user) # hDes.delete() # return HttpResponseRedirect('/aims/') # else: # return HttpResponseRedirect('/aims/')# #################################################### # # ##########covenors and coconvenors################## # @csrf_exempt def add_convenor(request): # """ # to add a new student convenor/coconvenor # @param: # request - contains metadata about the requested page # @variables: # rollno - rollno of the student to become the convenor/coconvenor # extraInfo - extraInfo object of the student with that rollno # s - designation object of Convenor # p - designation object of Co Convenor # result - the data that contains where the student will become # convenor or coconvenor # hDes - holdsDesignation object to store that the particualr student is # holding the convenor/coconvenor designation # student - the student object of the new convenor/coconvenor # data - data of the student to be displayed in the webpage # """ s = Designation.objects.get(name='Convenor') # p = Designation.objects.get(name='Co Convenor') # if request.method == 'POST': # rollno = request.POST.get('rollno_convenor') # extraInfo = ExtraInfo.objects.get(id=rollno) # s = Designation.objects.get(name='Convenor') # p = Designation.objects.get(name='Co Convenor') # result = request.POST.get('designation') # hDes = HoldsDesignation() # hDes.user = extraInfo.user # hDes.working = extraInfo.user # if result == "Convenor": # hDes.designation = s # else: # hDes.designation = p # hDes.save() # data = { # 'name': extraInfo.user.username, # 'rollno_convenor': extraInfo.id, # 'designation': hDes.designation.name, # } # return JsonResponse(data) # else: # data = {} # return JsonResponse(data) # @csrf_exempt def deleteConvenor(request, pk): # """ # to remove a convenor/coconvenor from the position # @param: # request - contains metadata about the requested page # pk - the primary key of that particular student field # @variables: # s - the designation object that contains convenor # c - the designation object that contains co convenor # student - the student object with the given pk # hDes - the holdDesignation object that stores the # information that the particular student is a convenor/coconvenor to be deleted # data - data of the student to be hidden in the webpage # """ # s = get_object_or_404(Designation, name="Convenor") c = get_object_or_404(Designation, name="Co Convenor") # student = get_object_or_404(ExtraInfo, id=pk) # hDes = HoldsDesignation.objects.filter(user = student.user) # designation = [] # for des in hDes: # if des.designation == s or des.designation == c: # designation = des.designation.name # des.delete() # data = { # 'id': pk, # 'designation': designation, # } # return JsonResponse(data)# ###################################################### # # ##########Senate meeting Minute################## # @csrf_exempt def addMinute(request): # """ # to add a new senate meeting minute object to the database. # @param: # request - contains metadata about the requested page # @variables: # current_user - details of the current user. # desig_id - to check the designation of the user. # user_details - to get the details of the required user. # """ # current_user = get_object_or_404(User, username=request.user.username) # user_details = ExtraInfo.objects.all().filter(user=current_user).first() # desig_id = Designation.objects.all().filter(name='Upper Division Clerk') temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first() # print (temp) # print (current_user) # acadadmin = temp.working # k = str(user_details).split() # print(k) # final_user = k[2] # if (str(acadadmin) != str(final_user)): # return HttpResponseRedirect('/academic-procedures/') # if request.method == 'POST' and request.FILES: # form = MinuteForm(request.POST, request.FILES) # if form.is_valid(): # form.save() # return HttpResponse('sucess') # else: # return HttpResponse('not uploaded') # return render(request, "ais/ais.html", {}) def deleteMinute(request): # """ # to delete an existing senate meeting minute object from the database. # @param: # request - contains metadata about the requested page # @variables: # data - the id of the minute object to be deleted # t - the minute object received from id to be deleted # """ # if request.method == "POST": # data = request.POST['delete'] # t = Meeting.objects.get(id=data) # t.delete() return HttpResponseRedirect('/aims/') # # ###################################################### # # ##########Student basic profile################## # @csrf_exempt def add_basic_profile(request): # """ # It adds the basic profile information like username,password, name, # rollno, etc of a student # @param: # request - contains metadata about the requested page # @variables: # name - the name of the student # roll - the rollno of the student # batch - the current batch of the student # programme - the programme the student is enrolled in # ph - the phone number of the student # """ if request.method == "POST": name = request.POST.get('name') # roll = ExtraInfo.objects.get(id=request.POST.get('rollno')) # programme = request.POST.get('programme') # batch = request.POST.get('batch') # ph = request.POST.get('phoneno') # if not Student.objects.filter(id=roll).exists(): # db = Student() # st = ExtraInfo.objects.get(id=roll.id) # db.name = name.upper() # db.id = roll # db.batch = batch # db.programme = programme # st.phone_no = ph # db.save() # st.save() # data = { # 'name': name, # 'rollno': roll.id, # 'programme': programme, # 'phoneno': ph, # 'batch': batch # } # print(data) # return JsonResponse(data) # else: # data = {} # return JsonResponse(data) # else: # data = {} # return JsonResponse(data) # @csrf_exempt def delete_basic_profile(request, pk): # """ # Deletes the student from the database # @param: # request - contains metadata about the requested page # pk - the primary key of the student's record in the database table # @variables: # e - the extraInfo objects of the student # user - the User object of the student # s - the student object of the student # """ e = get_object_or_404(ExtraInfo, id=pk) # user = get_object_or_404(User, username = e.user.username) # s = get_object_or_404(Student, id=e) # data = { # 'rollno': pk, # } # s.delete() # e.delete() # u.delete() # return JsonResponse(data)# ######################################################### # ''' # # view to add attendance data to database # def curriculum(request): # ''' def delete_advanced_profile(request): # """ # to delete the advance information of the student # @param: # request - contains metadata about the requested page # @variables: # current_user - the username of the logged in user # user_details - the details of the current user # desig_id - checking the designation of the current user # acadadmin - deatils of the acad admin # s - the student object from the requested rollno # """ current_user = get_object_or_404(User, username=request.user.username) # user_details = ExtraInfo.objects.all().filter(user=current_user).first() # desig_id = Designation.objects.all().filter(name='Upper Division Clerk') # temp = HoldsDesignation.objects.all().filter(designation = desig_id).first() # print (temp) # print (current_user) # acadadmin = temp.working # k = str(user_details).split() # print(k) # final_user = k[2] # if (str(acadadmin) != str(final_user)): # return HttpResponseRedirect('/academic-procedures/') # if request.method == "POST": # st = request.POST['delete'] # arr = st.split("-") # stu = arr[0] # if Student.objects.get(id=stu): # s = Student.objects.get(id=stu) # s.father_name = "" # s.mother_name = "" # s.hall_no = 1 # s.room_no = "" # s.save() # else: # return HttpResponse("Data Does Not Exist") # return HttpResponse("Data Deleted Successfully") def add_advanced_profile(request): # """ # It adds the advance profile information like hall no, room no, # profile picture, about me etc of a student # @param: # request - contains metadata about the requested page # @variables: # current_user - the username of the logged in user # user_details - the details of the current user # desig_id - checking the designation of the current user # acadadmin - deatils of the acad admin # father - father's name of the student # rollno - the rollno of the student required to check if the student is available # mother - mother's name of the student # add - student's address # cpi - student's cpi # hall - hall no of where the student stays # room no - hostel room no # """ current_user = get_object_or_404(User, username=request.user.username) # user_details = ExtraInfo.objects.all().filter(user=current_user).first() # desig_id = Designation.objects.all().filter(name='Upper Division Clerk') # temp = HoldsDesignation.objects.all().filter(designation = desig_id).first() # print (temp) # print (current_user) # acadadmin = temp.working # k = str(user_details).split() # print(k) # final_user = k[2] # if (str(acadadmin) != str(final_user)): # return HttpResponseRedirect('/academic-procedures/') # if request.method == "POST": # print(request.POST) # rollno=request.POST.get('roll') # print(rollno) # student = ExtraInfo.objects.get(id=rollno) # print(student.address) # if not student: # data = {} # return JsonResponse(data) # else: # father = request.POST.get('father') # mother = request.POST.get('mother') # add = request.POST.get('address') # hall = request.POST.get('hall') # room = request.POST.get('room') # cpi = request.POST.get('cpi') # student.address = str(hall) + " " + str(room) # student.save() # s = Student.objects.get(id=student) # s.father_name=father # s.mother_name=mother # s.hall_no = hall # s.room_no = room # s.save() # return HttpResponseRedirect('/academic-procedures/') # return HttpResponseRedirect('/academic-procedures/') def add_optional(request): # """ # acadmic admin to update the additional courses # @param: # request - contains metadata about the requested page. # @variables: # choices - selected addtional courses by the academic person. # course - Course details which is selected by the academic admin. # """ if request.method == "POST": pass # print(request.POST) # choices = request.POST.getlist('choice') # for i in choices: # course = Course.objects.all().filter(course_id=i).first() # course.acad_selection = True # course.save() # courses = Course.objects.all() # for i in courses: # if i.course_id not in choices: # i.acad_selection = False # i.save() # return HttpResponseRedirect('/academic-procedures/') def min_cred(request): # """ # to set minimum credit for a current semester that a student must take # @param: # request - contains metadata about the requested page. # @variables: # sem_cred = Get credit details from forms and the append it to an array. # sem - Get the object for the minimum credits from the database and the update it. # """ if request.method=="POST": sem_cred = [] # sem_cred.append(0) # for i in range(1, 10): # sem = "sem_"+"1" # sem_cred.append(request.POST.getlist(sem)[0]) # for i in range(1, 9): # sem = MinimumCredits.objects.all().filter(semester=i).first() # sem.credits = sem_cred[i+1] # sem.save() # return HttpResponse("Worked") def view_course(request): # if request.method == "POST": # programme=request.POST['programme'] # batch=request.POST['batch'] # branch=request.POST['branch'] # sem=request.POST['sem'] # curriculum_courses = Curriculum.objects.filter(branch = branch).filter(batch = batch).filter(programme= programme).filter(sem = sem) # print(curriculum_courses) # courses = Course.objects.all() # course_type = Constants.COURSE_TYPE # context= { # 'courses': courses, # 'course_type': course_type, # 'curriculum_course': curriculum_courses, # } # return render(request, "ais/ais.html", context) # else: # return render(request, "ais/ais.html") return render(request, "ais/ais.html") def delete_grade(request): # """ # It deletes the grade of the student # @param: # request - contains metadata about the requested page # @variables: # current_user - father's name of the student # user_details - the rollno of the student required to check if the student is available # desig_id - mother 's name of the student # acadadmin - student's address # final_user - details of the user # sem - current semester of the student # data - tag whether to delete it or not # course - get the course details # """ # current_user = get_object_or_404(User, username=request.user.username) # user_details = ExtraInfo.objects.all().filter(user=current_user).first() # desig_id = Designation.objects.all().filter(name='Upper Division Clerk') # temp = HoldsDesignation.objects.all().filter(designation = desig_id).first() # print (temp) # print (current_user) # acadadmin = temp.working # k = str(user_details).split() # print(k) # final_user = k[2] # if (str(acadadmin) != str(final_user)): # return HttpResponseRedirect('/academic-procedures/') # print(request.POST['delete']) # data = request.POST['delete'] # d = data.split("-") # id = d[0] # course = d[2] # sem = int(d[3]) # if request.method == "POST": # if(Grades.objects.filter(student_id=id, sem=sem)): # s = Grades.objects.filter(student_id=id, sem=sem) # for p in s: # if (str(p.course_id) == course): # print(p.course_id) # p.delete() # else: # return HttpResponse("Unable to delete data") return HttpResponse("Data Deleted SuccessFully") @login_required def verify_grade(request): """ It verify the grades of the student @param: request - contains metadata about the requested page @variables: current_user - father's name of the student user_details - the rollno of the student required to check if the student is available desig_id - mother's name of the student acadadmin - student's address subject - subject of which the grade has to be added sem - semester of the student grade - grade to be added in the student course - course ofwhich the grade is added """ # if user_check(request): # return HttpResponseRedirect('/academic-procedures/') # if request.method == "POST": # curr_id=request.POST['course'] # print(curr_id) # curr_course = Curriculum.objects.filter(curriculum_id=curr_id) # grades = Grades.objects.filter(curriculum_id=curr_course) # context= { # 'grades': grades, # 'tab_id' :"2" # } # return render(request,"ais/ais.html", context) # else: # return HttpResponseRedirect('/aims/') return HttpResponseRedirect('/aims/') def confirm_grades(request): # if user_check(request): # return HttpResponseRedirect('/academic-procedures/') # if request.method == "POST": # print("confirm hone wala hai") # print(request.POST) return HttpResponseRedirect('/aims/')
35.826674
199
0.591761
7,731
69,038
5.165438
0.071401
0.024791
0.016652
0.019282
0.622202
0.581109
0.55484
0.526794
0.509015
0.486828
0
0.005869
0.30153
69,038
1,926
200
35.845275
0.822277
0.435094
0
0.525172
0
0
0.081307
0.015799
0
0
0
0
0
1
0.042334
false
0.010297
0.026316
0.004577
0.140732
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a2c8b21ea65c7fbe7c24c113fc96385ffdf77cb
1,245
py
Python
subject/tests/functional/test_glance_replicator.py
laoyigrace/subject
e6ed989fdc250917a19788112b22322b73b3550f
[ "Apache-2.0" ]
null
null
null
subject/tests/functional/test_glance_replicator.py
laoyigrace/subject
e6ed989fdc250917a19788112b22322b73b3550f
[ "Apache-2.0" ]
null
null
null
subject/tests/functional/test_glance_replicator.py
laoyigrace/subject
e6ed989fdc250917a19788112b22322b73b3550f
[ "Apache-2.0" ]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test cases for subject-replicator""" import sys from subject.tests import functional from subject.tests.utils import execute class TestGlanceReplicator(functional.FunctionalTest): """Functional tests for subject-replicator""" def test_compare(self): # Test for issue: https://bugs.launchpad.net/glance/+bug/1598928 cmd = ('%s -m subject.cmd.replicator ' 'compare az1:9292 az2:9292 --debug' % (sys.executable,)) exitcode, out, err = execute(cmd, raise_error=False) self.assertIn( 'Request: GET http://az1:9292/v1/subjects/detail?is_public=None', err )
36.617647
78
0.683534
164
1,245
5.170732
0.646341
0.070755
0.03066
0.037736
0
0
0
0
0
0
0
0.028008
0.225703
1,245
33
79
37.727273
0.85166
0.560643
0
0
0
0
0.23619
0.041905
0
0
0
0
0.076923
1
0.076923
false
0
0.230769
0
0.384615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a2d310413da9e85779ff39fa32f7bd2c4075553
8,648
py
Python
test/countries/test_united_states.py
OmoMicheal/marketanalysis
ddc2476ec918a28658e64574e89d8944cee75617
[ "MIT" ]
2
2021-06-29T21:56:15.000Z
2022-02-17T22:10:55.000Z
test/countries/test_united_states.py
OmoMicheal/marketanalysis
ddc2476ec918a28658e64574e89d8944cee75617
[ "MIT" ]
null
null
null
test/countries/test_united_states.py
OmoMicheal/marketanalysis
ddc2476ec918a28658e64574e89d8944cee75617
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # marketanalysis # ---------------- # A fast, efficient Python library for generating country, province and state # specific sets of marketmarketholidayss on the fly. It aims to make determining whether a # specific date is a holiday as fast and flexible as possible. # # Author: MichealOmojola <[email protected]> # Website: https://github.com/OmoMicheal/trading_days # License: MIT (see LICENSE file) # Version: 0.1 (April 7, 2021) import unittest from datetime import date from dateutil.relativedelta import relativedelta # import sys # sys.path.insert(0, 'C:/Users/momojola/projects/marketanalysis/marketanalysis/') from marketanalysis import marketholidays from marketanalysis import markettradingdays class TestUS(unittest.TestCase): def setUp(self): self.marketholidayss = marketholidays.USA(observed=False) self.markettradingdayss = markettradingdays.USA() def test_new_years(self): self.assertNotIn(date(2010, 12, 31), self.marketholidayss) self.assertNotIn(date(2017, 1, 2), self.marketholidayss) self.marketholidayss.observed = True self.assertIn(date(2010, 12, 31), self.marketholidayss) self.assertIn(date(2017, 1, 2), self.marketholidayss) self.marketholidayss.observed = False for year in range(1900, 2100): dt = date(year, 1, 1) self.assertIn(dt, self.marketholidayss) self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss) self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss) def test_martin_luther(self): for dt in [ date(1986, 1, 20), date(1999, 1, 18), date(2000, 1, 17), date(2012, 1, 16), date(2013, 1, 21), date(2014, 1, 20), date(2015, 1, 19), date(2016, 1, 18), date(2020, 1, 20), ]: self.assertIn(dt, self.marketholidayss) self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss) self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss) def test_washingtons_birthday(self): de_marketholidayss = marketholidays.US() for dt in [ date(1969, 2, 22), date(1970, 2, 22), date(1971, 2, 15), date(1997, 2, 17), date(1999, 2, 15), date(2000, 2, 21), date(2012, 2, 20), date(2013, 2, 18), date(2014, 2, 17), date(2015, 2, 16), date(2016, 2, 15), date(2020, 2, 17), ]: self.assertIn(dt, self.marketholidayss) self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss) self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss) self.assertIn(dt, de_marketholidayss) self.assertEqual(marketholidays.US().get("2015-02-16"), "Presidents' Day") def test_good_friday(self): marketholidayss_US = marketholidays.US() for dt in [ date(1900, 4, 13), date(1901, 4, 5), date(1902, 3, 28), date(1999, 4, 2), date(2000, 4, 21), date(2010, 4, 2), date(2018, 3, 30), date(2019, 4, 19), date(2020, 4, 10), ]: self.assertIn(dt, self.marketholidayss) self.assertIn(dt, marketholidayss_US) def test_memorial_day(self): for dt in [ date(1969, 5, 30), date(1970, 5, 30), date(1971, 5, 31), date(1997, 5, 26), date(1999, 5, 31), date(2000, 5, 29), date(2012, 5, 28), date(2013, 5, 27), date(2014, 5, 26), date(2015, 5, 25), date(2016, 5, 30), date(2020, 5, 25), ]: self.assertIn(dt, self.marketholidayss) self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss) self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss) def test_independence_day(self): for year in range(1900, 2100): dt = date(year, 7, 4) self.assertIn(dt, self.marketholidayss) self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss) self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss) self.assertNotIn(date(2010, 7, 5), self.marketholidayss) self.assertNotIn(date(2020, 7, 3), self.marketholidayss) self.marketholidayss.observed = True self.assertIn(date(2010, 7, 5), self.marketholidayss) self.assertIn(date(2020, 7, 3), self.marketholidayss) def test_labor_day(self): for dt in [ date(1997, 9, 1), date(1999, 9, 6), date(2000, 9, 4), date(2012, 9, 3), date(2013, 9, 2), date(2014, 9, 1), date(2015, 9, 7), date(2016, 9, 5), date(2020, 9, 7), ]: self.assertIn(dt, self.marketholidayss) self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss) self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss) def test_thanksgiving_day(self): for dt in [ date(1997, 11, 27), date(1999, 11, 25), date(2000, 11, 23), date(2012, 11, 22), date(2013, 11, 28), date(2014, 11, 27), date(2015, 11, 26), date(2016, 11, 24), date(2020, 11, 26), ]: self.assertNotIn(dt, self.marketholidayss) self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss) self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss) def test_christmas_eve(self): as_marketholidayss = marketholidays.US() self.marketholidayss.observed = False for year in range(1900, 2050): self.assertNotIn(date(year, 12, 24), self.marketholidayss) # self.assertIn(date(year, 12, 24), as_marketholidayss) self.assertNotIn(date(2016, 12, 23), as_marketholidayss) self.assertNotIn( "Christmas Eve (Observed)", as_marketholidayss.get_list(date(2017, 12, 22)), ) def test_christmas_day(self): for year in range(1900, 2100): dt = date(year, 12, 25) self.assertIn(dt, self.marketholidayss) self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss) self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss) self.assertNotIn(date(2010, 12, 24), self.marketholidayss) self.assertNotIn(date(2016, 12, 26), self.marketholidayss) self.marketholidayss.observed = True self.assertIn(date(2010, 12, 24), self.marketholidayss) self.assertIn(date(2016, 12, 26), self.marketholidayss) def test_day_after_christmas(self): nc_marketholidayss = marketholidays.US(observed=False) self.assertNotIn(date(2015, 12, 28), nc_marketholidayss) self.assertNotIn(date(2016, 12, 27), nc_marketholidayss) nc_marketholidayss.observed = True def test_new_years_eve(self): ky_marketholidayss = marketholidays.US() self.assertNotIn(date(2012, 12, 31), ky_marketholidayss) for dt in [date(2013, 12, 31), date(2016, 12, 30)]: self.assertNotIn(dt, self.marketholidayss) self.assertNotIn(dt, ky_marketholidayss) def test_future_list(self): current_date = '2021-04-13' lookup_step = 10 self.assertIn(date(2021, 4, 16), self.markettradingdayss.future_list(current_date, lookup_step)) self.assertNotIn(date(2021, 4, 18), self.markettradingdayss.future_list(current_date, lookup_step)) def test_prevDays(self): current_date = '2021-04-13' lookback_step = 4 self.assertIn(date(2021, 4, 9), self.markettradingdayss.prevDays(current_date, lookback_step)) self.assertNotIn(date(2021, 4, 11), self.markettradingdayss.prevDays(current_date, lookback_step)) def test_BtwDates(self): current_date = '2021-04-13' future_date = '2021-04-20' self.assertIn(date(2021, 4, 15), self.markettradingdayss.BtwDates(current_date, future_date)) self.assertNotIn(date(2021, 4, 18), self.markettradingdayss.BtwDates(current_date, future_date)) # if __name__ == "__main__": # unittest.main()
38.096916
107
0.591929
1,012
8,648
4.98419
0.172925
0.173275
0.145916
0.148295
0.569191
0.537669
0.464116
0.376289
0.327121
0.296788
0
0.115515
0.285268
8,648
227
108
38.096916
0.700534
0.074006
0
0.275281
0
0
0.011136
0
0
0
0
0
0.303371
1
0.089888
false
0
0.02809
0
0.123596
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a2eae99a5174fb4958d90cc6f7e9618fa70f6e1
2,414
py
Python
unsorted/linked_list.py
AlgoArt/algoart
7a7a28f099351a6b6c1b360c794f697881c7e429
[ "Unlicense" ]
1
2015-09-20T06:35:58.000Z
2015-09-20T06:35:58.000Z
unsorted/linked_list.py
algoart/algoart
7a7a28f099351a6b6c1b360c794f697881c7e429
[ "Unlicense" ]
null
null
null
unsorted/linked_list.py
algoart/algoart
7a7a28f099351a6b6c1b360c794f697881c7e429
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python # linked_list.py - Linked list implementation in Python by Sergey 2015 """ Linked list implementation in Python """ # Standard modules import unittest import sys import os import argparse import re import random import subprocess import getpass import shutil # Additional modules ############################################################################### # Linked_list Class ############################################################################### class Node: def __init__(self, value, tail): self.value = value self.next = tail class Linked_list: """ Linked_list representation """ def __init__(self): """ Default constructor """ self.list = None def insert(self, value): self.list = Node(value, self.list) def start_iter(self): return self.list def next_iter(self, iter): if iter is not None: return iter.next else: return iter def tolist(self): result = [] iter = self.start_iter() while True: result.append(iter.value) iter = self.next_iter(iter) if not iter: break return result def run(self, test=False): """ Main execution function """ if test: return ############################################################################### # Executable code ############################################################################### def main(): # Sandbox sb = Linked_list(" ".join(sys.argv[1:])) sb.run() ############################################################################### # Unit Tests ############################################################################### class unitTests(unittest.TestCase): def test_Linked_list_class__basic_functionality(self): """ Linked_list class basic testing """ d = Linked_list() self.assertEqual(d.list, None) d.insert(1) self.assertEqual(d.list.value, 1) d.insert(2) self.assertEqual(d.list.next.value, 1) iter = d.start_iter() self.assertEqual(iter.value, 2) iter = d.next_iter(iter) self.assertEqual(iter.value, 1) self.assertEqual(d.tolist(), [2, 1]) if __name__ == "__main__": if sys.argv[-1] == "-ut": unittest.main(argv=[" "]) main()
22.773585
79
0.474731
238
2,414
4.668067
0.319328
0.090009
0.057606
0.054005
0.108011
0
0
0
0
0
0
0.007642
0.241094
2,414
105
80
22.990476
0.598799
0.133389
0
0
0
0
0.008238
0
0
0
0
0
0.105263
1
0.157895
false
0.017544
0.157895
0.017544
0.45614
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a2f2c6d37a1cc224033909e079c7c6469595c55
8,555
py
Python
examples/seismic/viscoacoustic/wavesolver.py
speglich/devito
b636f7694eb6a1e19b0f2c48f44ff63613029a7b
[ "MIT" ]
1
2020-01-31T10:35:49.000Z
2020-01-31T10:35:49.000Z
examples/seismic/viscoacoustic/wavesolver.py
speglich/devito
b636f7694eb6a1e19b0f2c48f44ff63613029a7b
[ "MIT" ]
52
2020-10-12T19:29:09.000Z
2022-03-10T14:05:22.000Z
examples/seismic/viscoacoustic/wavesolver.py
alisiahkoohi/devito
f535a44dff12de2837eb6e3217a65ffb2d371cb8
[ "MIT" ]
1
2020-06-02T03:31:11.000Z
2020-06-02T03:31:11.000Z
from devito import VectorTimeFunction, TimeFunction, NODE from devito.tools import memoized_meth from examples.seismic import PointSource from examples.seismic.viscoacoustic.operators import (ForwardOperator, AdjointOperator) class ViscoacousticWaveSolver(object): """ Solver object that provides operators for seismic inversion problems and encapsulates the time and space discretization for a given problem setup. Parameters ---------- model : Model Physical model with domain parameters. geometry : AcquisitionGeometry Geometry object that contains the source (SparseTimeFunction) and receivers (SparseTimeFunction) and their position. space_order : int, optional Order of the spatial stencil discretisation. Defaults to 4. kernel : selects a visco-acoustic equation from the options below: 'sls' (Standard Linear Solid) : 1st order - Blanch and Symes (1995) / Dutta and Schuster (2014) viscoacoustic equation 2nd order - Bai et al. (2014) viscoacoustic equation 'ren' - Ren et al. (2014) viscoacoustic equation 'deng_mcmechan' - Deng and McMechan (2007) viscoacoustic equation Defaults to 'sls' 2nd order. """ def __init__(self, model, geometry, space_order=4, kernel='sls', time_order=2, **kwargs): self.model = model self.model._initialize_bcs(bcs="mask") self.geometry = geometry self.space_order = space_order self.kernel = kernel self.time_order = time_order self._kwargs = kwargs @property def dt(self): return self.model.critical_dt @memoized_meth def op_fwd(self, save=None): """Cached operator for forward runs with buffered wavefield""" return ForwardOperator(self.model, save=save, geometry=self.geometry, space_order=self.space_order, kernel=self.kernel, time_order=self.time_order, **self._kwargs) @memoized_meth def op_adj(self): """Cached operator for adjoint runs""" return AdjointOperator(self.model, save=None, geometry=self.geometry, space_order=self.space_order, kernel=self.kernel, time_order=self.time_order, **self._kwargs) def forward(self, src=None, rec=None, v=None, r=None, p=None, qp=None, b=None, vp=None, save=None, **kwargs): """ Forward modelling function that creates the necessary data objects for running a forward modelling operator. Parameters ---------- src : SparseTimeFunction or array_like, optional Time series data for the injected source term. rec : SparseTimeFunction or array_like, optional The interpolated receiver data. v : VectorTimeFunction, optional The computed particle velocity. r : TimeFunction, optional The computed memory variable. p : TimeFunction, optional Stores the computed wavefield. qp : Function, optional The P-wave quality factor. b : Function, optional The time-constant inverse density. vp : Function or float, optional The time-constant velocity. save : bool, optional Whether or not to save the entire (unrolled) wavefield. Returns ------- Receiver, wavefield and performance summary """ # Source term is read-only, so re-use the default src = src or self.geometry.src # Create a new receiver object to store the result rec = rec or self.geometry.rec # Create all the fields v, p, r save_t = src.nt if save else None if self.time_order == 1: v = v or VectorTimeFunction(name="v", grid=self.model.grid, save=save_t, time_order=self.time_order, space_order=self.space_order) kwargs.update({k.name: k for k in v}) # Create the forward wavefield if not provided p = p or TimeFunction(name="p", grid=self.model.grid, save=save_t, time_order=self.time_order, space_order=self.space_order, staggered=NODE) # Memory variable: r = r or TimeFunction(name="r", grid=self.model.grid, save=save_t, time_order=self.time_order, space_order=self.space_order, staggered=NODE) # Pick physical parameters from model unless explicitly provided b = b or self.model.b qp = qp or self.model.qp # Pick vp from model unless explicitly provided vp = vp or self.model.vp if self.kernel == 'sls': # Execute operator and return wavefield and receiver data # With Memory variable summary = self.op_fwd(save).apply(src=src, rec=rec, qp=qp, r=r, p=p, b=b, vp=vp, dt=kwargs.pop('dt', self.dt), **kwargs) else: # Execute operator and return wavefield and receiver data # Without Memory variable summary = self.op_fwd(save).apply(src=src, rec=rec, qp=qp, p=p, b=b, vp=vp, dt=kwargs.pop('dt', self.dt), **kwargs) return rec, p, v, summary def adjoint(self, rec, srca=None, va=None, pa=None, vp=None, qp=None, b=None, r=None, **kwargs): """ Adjoint modelling function that creates the necessary data objects for running an adjoint modelling operator. Parameters ---------- rec : SparseTimeFunction or array-like The receiver data. Please note that these act as the source term in the adjoint run. srca : SparseTimeFunction or array-like The resulting data for the interpolated at the original source location. va : VectorTimeFunction, optional The computed particle velocity. pa : TimeFunction, optional Stores the computed wavefield. vp : Function or float, optional The time-constant velocity. qp : Function, optional The P-wave quality factor. b : Function, optional The time-constant inverse density. r : TimeFunction, optional The computed memory variable. Returns ------- Adjoint source, wavefield and performance summary. """ # Create a new adjoint source and receiver symbol srca = srca or PointSource(name='srca', grid=self.model.grid, time_range=self.geometry.time_axis, coordinates=self.geometry.src_positions) if self.time_order == 1: va = va or VectorTimeFunction(name="va", grid=self.model.grid, time_order=self.time_order, space_order=self.space_order) kwargs.update({k.name: k for k in va}) pa = pa or TimeFunction(name="pa", grid=self.model.grid, time_order=self.time_order, space_order=self.space_order, staggered=NODE) # Memory variable: r = r or TimeFunction(name="r", grid=self.model.grid, time_order=self.time_order, space_order=self.space_order, staggered=NODE) b = b or self.model.b qp = qp or self.model.qp # Pick vp from model unless explicitly provided vp = vp or self.model.vp # Execute operator and return wavefield and receiver data if self.kernel == 'sls': # Execute operator and return wavefield and receiver data # With Memory variable summary = self.op_adj().apply(src=srca, rec=rec, pa=pa, r=r, b=b, vp=vp, qp=qp, dt=kwargs.pop('dt', self.dt), **kwargs) else: summary = self.op_adj().apply(src=srca, rec=rec, pa=pa, vp=vp, b=b, qp=qp, dt=kwargs.pop('dt', self.dt), **kwargs) return srca, pa, va, summary
42.142857
89
0.575453
988
8,555
4.911943
0.205466
0.038945
0.029466
0.031321
0.515763
0.450237
0.409437
0.39048
0.366577
0.334844
0
0.004998
0.345178
8,555
202
90
42.351485
0.8613
0.396844
0
0.4125
0
0
0.007155
0
0
0
0
0
0
1
0.075
false
0
0.05
0.0125
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a2f3c798c33272d514bf543343fc4f88d9112f6
1,483
py
Python
StaticProcess/apriori.py
NIL-zhuang/NJU-Data-Integration
78315d33cda6b69dd16a4704fa8e0dfc6fc359b6
[ "MIT" ]
null
null
null
StaticProcess/apriori.py
NIL-zhuang/NJU-Data-Integration
78315d33cda6b69dd16a4704fa8e0dfc6fc359b6
[ "MIT" ]
null
null
null
StaticProcess/apriori.py
NIL-zhuang/NJU-Data-Integration
78315d33cda6b69dd16a4704fa8e0dfc6fc359b6
[ "MIT" ]
null
null
null
import pandas as pd import os from tqdm import tqdm from collections import defaultdict from mlxtend.preprocessing import TransactionEncoder from mlxtend.frequent_patterns import apriori dataPath = "data/static" itemSetList = [] def loadDataSet(): with open(os.path.join(dataPath, "aprioriData.csv"), 'r') as f: for line in f.readlines(): line = line.replace('\n', '') cates = line.split(' ') itemSetList.append(list(map(int, cates))) def myApriori(): te = TransactionEncoder() te_ary = te.fit(itemSetList).transform(itemSetList) df = pd.DataFrame(te_ary, columns=te.columns_) return df def dataInit(): if os.path.exists(os.path.join(dataPath, "aprioriData.csv")): return df = pd.read_csv("data/static/static.csv") user_category = defaultdict(set) for idx, row in tqdm(df.iterrows(), total=df.shape[0], desc="category data generate"): user_category[row['USER_ID']].add(row['CATEGORY_ID']) with open(os.path.join(dataPath, "aprioriData.csv"), 'w+') as f: for k, v in tqdm(user_category.items()): f.write(' '.join(sorted(list(map(str, v))))+'\n') if __name__ == '__main__': dataInit() loadDataSet() df = myApriori() frequent_itemsets = apriori(df, min_support=0.0035, use_colnames=True) frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x)) print(frequent_itemsets[(frequent_itemsets['length'] >= 2)])
32.23913
90
0.666891
194
1,483
4.969072
0.463918
0.082988
0.03112
0.056017
0.116183
0.116183
0.082988
0.082988
0
0
0
0.005795
0.185435
1,483
45
91
32.955556
0.792219
0
0
0
0
0
0.104518
0.014835
0
0
0
0
0
1
0.083333
false
0
0.166667
0
0.305556
0.027778
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a3098e50d11c10c71215d547da0dfbd833ce050
1,205
py
Python
tonclient/test/helpers.py
move-ton/ton-client-py
a9393a0e03b5da9bf5369a44c6873a3e720af229
[ "Apache-2.0" ]
28
2020-10-29T06:57:32.000Z
2022-03-20T12:26:14.000Z
tonclient/test/helpers.py
move-ton/ton-client-py
a9393a0e03b5da9bf5369a44c6873a3e720af229
[ "Apache-2.0" ]
1
2021-03-30T18:18:17.000Z
2021-04-04T15:35:10.000Z
tonclient/test/helpers.py
move-ton/ton-client-py
a9393a0e03b5da9bf5369a44c6873a3e720af229
[ "Apache-2.0" ]
8
2020-10-28T20:11:52.000Z
2022-01-12T12:28:02.000Z
import os from tonclient.client import TonClient from tonclient.types import Abi, CallSet, Signer, ClientConfig, \ ParamsOfEncodeMessage, ParamsOfProcessMessage BASE_DIR = os.path.dirname(__file__) SAMPLES_DIR = os.path.join(BASE_DIR, 'samples') GIVER_ADDRESS = '0:f5c2510bfe407363cb1db6b9d7bc1184a05f8b343aeaa828189c580e8569ee23' client_config = ClientConfig() client_config.network.endpoints = ['https://tonos.freeton.surf'] async_core_client = TonClient(config=client_config) sync_core_client = TonClient(config=client_config, is_core_async=False) def send_grams(address: str): giver_abi = Abi.from_path( path=os.path.join(SAMPLES_DIR, 'Giver.abi.json')) call_set = CallSet( function_name='grant', input={'dest': address}) encode_params = ParamsOfEncodeMessage( abi=giver_abi, signer=Signer.NoSigner(), address=GIVER_ADDRESS, call_set=call_set) process_params = ParamsOfProcessMessage( message_encode_params=encode_params, send_events=False) async_core_client.processing.process_message(params=process_params) def tonos_punch(): send_grams( address='0:b5e9240fc2d2f1ff8cbb1d1dee7fb7cae155e5f6320e585fcc685698994a19a5')
36.515152
85
0.778423
136
1,205
6.602941
0.411765
0.053452
0.020045
0.055679
0.082405
0.082405
0
0
0
0
0
0.074144
0.126971
1,205
32
86
37.65625
0.779468
0
0
0
0
0
0.156017
0.109544
0
0
0
0
0
1
0.08
false
0
0.12
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a3176dcac3313f88ab52ef3d929182aaaba205a
12,423
py
Python
mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
jstzwjr/mmdetection
1c2878eb4f4da2978dcd9a05f9d0247726680213
[ "Apache-2.0" ]
1
2020-09-21T12:13:48.000Z
2020-09-21T12:13:48.000Z
mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
xiaojianying/mmdetection
a10d24d686e8714f42a9022da89124d04c0389ad
[ "Apache-2.0" ]
null
null
null
mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
xiaojianying/mmdetection
a10d24d686e8714f42a9022da89124d04c0389ad
[ "Apache-2.0" ]
null
null
null
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, build_upsample_layer from mmcv.ops import Conv2d from mmcv.ops.carafe import CARAFEPack from mmcv.runner import auto_fp16, force_fp32 from torch.nn.modules.utils import _pair from mmdet.core import mask_target from mmdet.models.builder import HEADS, build_loss BYTES_PER_FLOAT = 4 # TODO: This memory limit may be too much or too little. It would be better to # determine it based on available resources. GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit @HEADS.register_module() class FCNMaskHead(nn.Module): def __init__(self, num_convs=4, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, num_classes=80, class_agnostic=False, upsample_cfg=dict(type='deconv', scale_factor=2), conv_cfg=None, norm_cfg=None, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)): super(FCNMaskHead, self).__init__() self.upsample_cfg = upsample_cfg.copy() if self.upsample_cfg['type'] not in [ None, 'deconv', 'nearest', 'bilinear', 'carafe' ]: raise ValueError( f'Invalid upsample method {self.upsample_cfg["type"]}, ' 'accepted methods are "deconv", "nearest", "bilinear", ' '"carafe"') self.num_convs = num_convs # WARN: roi_feat_size is reserved and not used self.roi_feat_size = _pair(roi_feat_size) self.in_channels = in_channels self.conv_kernel_size = conv_kernel_size self.conv_out_channels = conv_out_channels self.upsample_method = self.upsample_cfg.get('type') self.scale_factor = self.upsample_cfg.pop('scale_factor', None) self.num_classes = num_classes self.class_agnostic = class_agnostic self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self.loss_mask = build_loss(loss_mask) self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = ( self.in_channels if i == 0 else self.conv_out_channels) padding = (self.conv_kernel_size - 1) // 2 self.convs.append( ConvModule( in_channels, self.conv_out_channels, self.conv_kernel_size, padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg)) upsample_in_channels = ( self.conv_out_channels if self.num_convs > 0 else in_channels) upsample_cfg_ = self.upsample_cfg.copy() if self.upsample_method is None: self.upsample = None elif self.upsample_method == 'deconv': upsample_cfg_.update( in_channels=upsample_in_channels, out_channels=self.conv_out_channels, kernel_size=self.scale_factor, stride=self.scale_factor) self.upsample = build_upsample_layer(upsample_cfg_) elif self.upsample_method == 'carafe': upsample_cfg_.update( channels=upsample_in_channels, scale_factor=self.scale_factor) self.upsample = build_upsample_layer(upsample_cfg_) else: # suppress warnings align_corners = (None if self.upsample_method == 'nearest' else False) upsample_cfg_.update( scale_factor=self.scale_factor, mode=self.upsample_method, align_corners=align_corners) self.upsample = build_upsample_layer(upsample_cfg_) out_channels = 1 if self.class_agnostic else self.num_classes logits_in_channel = ( self.conv_out_channels if self.upsample_method == 'deconv' else upsample_in_channels) self.conv_logits = Conv2d(logits_in_channel, out_channels, 1) self.relu = nn.ReLU(inplace=True) self.debug_imgs = None def init_weights(self): for m in [self.upsample, self.conv_logits]: if m is None: continue elif isinstance(m, CARAFEPack): m.init_weights() else: nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') nn.init.constant_(m.bias, 0) @auto_fp16() def forward(self, x): for conv in self.convs: x = conv(x) if self.upsample is not None: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_pred = self.conv_logits(x) return mask_pred def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, gt_masks, rcnn_train_cfg) return mask_targets @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, mask_targets, labels): loss = dict() if mask_pred.size(0) == 0: loss_mask = mask_pred.sum() * 0 else: if self.class_agnostic: loss_mask = self.loss_mask(mask_pred, mask_targets, torch.zeros_like(labels)) else: loss_mask = self.loss_mask(mask_pred, mask_targets, labels) loss['loss_mask'] = loss_mask return loss def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale): """Get segmentation masks from mask_pred and bboxes. Args: mask_pred (Tensor or ndarray): shape (n, #class, h, w). For single-scale testing, mask_pred is the direct output of model, whose type is Tensor, while for multi-scale testing, it will be converted to numpy array outside of this method. det_bboxes (Tensor): shape (n, 4/5) det_labels (Tensor): shape (n, ) img_shape (Tensor): shape (3, ) rcnn_test_cfg (dict): rcnn testing config ori_shape: original image size Returns: list[list]: encoded masks """ if isinstance(mask_pred, torch.Tensor): mask_pred = mask_pred.sigmoid() else: mask_pred = det_bboxes.new_tensor(mask_pred) device = mask_pred.device cls_segms = [[] for _ in range(self.num_classes) ] # BG is not included in num_classes bboxes = det_bboxes[:, :4] labels = det_labels if rescale: img_h, img_w = ori_shape[:2] else: if isinstance(scale_factor, float): img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32) img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32) else: w_scale, h_scale = scale_factor[0], scale_factor[1] img_h = np.round(ori_shape[0] * h_scale.item()).astype( np.int32) img_w = np.round(ori_shape[1] * w_scale.item()).astype( np.int32) scale_factor = 1.0 if not isinstance(scale_factor, (float, torch.Tensor)): scale_factor = bboxes.new_tensor(scale_factor) bboxes = bboxes / scale_factor N = len(mask_pred) # The actual implementation split the input into chunks, # and paste them chunk by chunk. if device.type == 'cpu': # CPU is most efficient when they are pasted one by one with # skip_empty=True, so that it performs minimal number of # operations. num_chunks = N else: # GPU benefits from parallelism for larger chunks, # but may have memory issue num_chunks = int( np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) assert (num_chunks <= N), 'Default GPU_MEM_LIMIT is too small; try increasing it' chunks = torch.chunk(torch.arange(N, device=device), num_chunks) threshold = rcnn_test_cfg.mask_thr_binary im_mask = torch.zeros( N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8) if not self.class_agnostic: mask_pred = mask_pred[range(N), labels][:, None] for inds in chunks: masks_chunk, spatial_inds = _do_paste_mask( mask_pred[inds], bboxes[inds], img_h, img_w, skip_empty=device.type == 'cpu') if threshold >= 0: masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) else: # for visualization and debugging masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) im_mask[(inds, ) + spatial_inds] = masks_chunk for i in range(N): cls_segms[labels[i]].append(im_mask[i].cpu().numpy()) return cls_segms def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True): """Paste instance masks acoording to boxes. This implementation is modified from https://github.com/facebookresearch/detectron2/ Args: masks (Tensor): N, 1, H, W boxes (Tensor): N, 4 img_h (int): Height of the image to be pasted. img_w (int): Width of the image to be pasted. skip_empty (bool): Only paste masks within the region that tightly bound all boxes, and returns the results this region only. An important optimization for CPU. Returns: tuple: (Tensor, tuple). The first item is mask tensor, the second one is the slice object. If skip_empty == False, the whole image will be pasted. It will return a mask of shape (N, img_h, img_w) and an empty tuple. If skip_empty == True, only area around the mask will be pasted. A mask of shape (N, h', w') and its start and end coordinates in the original image will be returned. """ # On GPU, paste all masks together (up to chunk size) # by using the entire image to sample the masks # Compared to pasting them one by one, # this has more operations but is faster on COCO-scale dataset. device = masks.device if skip_empty: x0_int, y0_int = torch.clamp( boxes.min(dim=0).values.floor()[:2] - 1, min=0).to(dtype=torch.int32) x1_int = torch.clamp( boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) y1_int = torch.clamp( boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) else: x0_int, y0_int = 0, 0 x1_int, y1_int = img_w, img_h x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 N = masks.shape[0] img_y = torch.arange( y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 img_x = torch.arange( x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 img_y = (img_y - y0) / (y1 - y0) * 2 - 1 img_x = (img_x - x0) / (x1 - x0) * 2 - 1 # img_x, img_y have shapes (N, w), (N, h) if torch.isinf(img_x).any(): inds = torch.where(torch.isinf(img_x)) img_x[inds] = 0 if torch.isinf(img_y).any(): inds = torch.where(torch.isinf(img_y)) img_y[inds] = 0 gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) grid = torch.stack([gx, gy], dim=3) img_masks = F.grid_sample( masks.to(dtype=torch.float32), grid, align_corners=False) if skip_empty: return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) else: return img_masks[:, 0], ()
39.189274
79
0.583353
1,634
12,423
4.205018
0.209914
0.025615
0.017465
0.016591
0.190656
0.11483
0.07699
0.056469
0.056469
0.034347
0
0.017174
0.325042
12,423
316
80
39.313291
0.802266
0.182484
0
0.108225
0
0
0.030471
0.002816
0
0
0
0.003165
0.004329
1
0.030303
false
0
0.047619
0
0.108225
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a3315016cdca312326db456e1d5eabcd1f0d049
14,798
py
Python
examples/machine_reading_comprehension/DuReader-robust/run_du.py
wzzju/PaddleNLP
1757a4fc2a3cd5a45f75c6482746777752b414d8
[ "Apache-2.0" ]
3
2021-09-06T11:27:49.000Z
2021-11-09T08:19:00.000Z
examples/machine_reading_comprehension/DuReader-robust/run_du.py
svs1984/PaddleNLP
9eb9e23b01d044706c789158ac6cf0d365aea848
[ "Apache-2.0" ]
null
null
null
examples/machine_reading_comprehension/DuReader-robust/run_du.py
svs1984/PaddleNLP
9eb9e23b01d044706c789158ac6cf0d365aea848
[ "Apache-2.0" ]
4
2021-08-23T07:46:06.000Z
2021-09-23T08:37:03.000Z
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import time import json import math from functools import partial import numpy as np import paddle from paddle.io import DataLoader from args import parse_args import paddlenlp as ppnlp from paddlenlp.data import Pad, Stack, Tuple, Dict from paddlenlp.transformers import BertForQuestionAnswering, BertTokenizer from paddlenlp.transformers import ErnieForQuestionAnswering, ErnieTokenizer from paddlenlp.transformers import ErnieGramForQuestionAnswering, ErnieGramTokenizer from paddlenlp.transformers import RobertaForQuestionAnswering, RobertaTokenizer from paddlenlp.transformers import LinearDecayWithWarmup from paddlenlp.metrics.squad import squad_evaluate, compute_prediction from paddlenlp.datasets import load_dataset MODEL_CLASSES = { "bert": (BertForQuestionAnswering, BertTokenizer), "ernie": (ErnieForQuestionAnswering, ErnieTokenizer), "ernie_gram": (ErnieGramForQuestionAnswering, ErnieGramTokenizer), "roberta": (RobertaForQuestionAnswering, RobertaTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) paddle.seed(args.seed) @paddle.no_grad() def evaluate(model, data_loader, args): model.eval() all_start_logits = [] all_end_logits = [] tic_eval = time.time() for batch in data_loader: input_ids, token_type_ids = batch start_logits_tensor, end_logits_tensor = model(input_ids, token_type_ids) for idx in range(start_logits_tensor.shape[0]): if len(all_start_logits) % 1000 == 0 and len(all_start_logits): print("Processing example: %d" % len(all_start_logits)) print('time per 1000:', time.time() - tic_eval) tic_eval = time.time() all_start_logits.append(start_logits_tensor.numpy()[idx]) all_end_logits.append(end_logits_tensor.numpy()[idx]) all_predictions, _, _ = compute_prediction( data_loader.dataset.data, data_loader.dataset.new_data, (all_start_logits, all_end_logits), False, args.n_best_size, args.max_answer_length) # Can also write all_nbest_json and scores_diff_json files if needed with open('prediction.json', "w", encoding='utf-8') as writer: writer.write( json.dumps( all_predictions, ensure_ascii=False, indent=4) + "\n") squad_evaluate( examples=data_loader.dataset.data, preds=all_predictions, is_whitespace_splited=False) model.train() class CrossEntropyLossForSQuAD(paddle.nn.Layer): def __init__(self): super(CrossEntropyLossForSQuAD, self).__init__() def forward(self, y, label): start_logits, end_logits = y start_position, end_position = label start_position = paddle.unsqueeze(start_position, axis=-1) end_position = paddle.unsqueeze(end_position, axis=-1) start_loss = paddle.nn.functional.cross_entropy( input=start_logits, label=start_position) end_loss = paddle.nn.functional.cross_entropy( input=end_logits, label=end_position) loss = (start_loss + end_loss) / 2 return loss def run(args): paddle.set_device(args.device) if paddle.distributed.get_world_size() > 1: paddle.distributed.init_parallel_env() rank = paddle.distributed.get_rank() task_name = args.task_name.lower() args.model_type = args.model_type.lower() model_class, tokenizer_class = MODEL_CLASSES[args.model_type] tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path) set_seed(args) if rank == 0: if os.path.exists(args.model_name_or_path): print("init checkpoint from %s" % args.model_name_or_path) model = model_class.from_pretrained(args.model_name_or_path) if paddle.distributed.get_world_size() > 1: model = paddle.DataParallel(model) def prepare_train_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. # NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is # that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead. contexts = [examples[i]['context'] for i in range(len(examples))] questions = [examples[i]['question'] for i in range(len(examples))] tokenized_examples = tokenizer( questions, contexts, stride=args.doc_stride, max_seq_len=args.max_seq_length) # Let's label those examples! for i, tokenized_example in enumerate(tokenized_examples): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_example["input_ids"] cls_index = input_ids.index(tokenizer.cls_token_id) # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offsets = tokenized_example['offset_mapping'] # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_example['token_type_ids'] # One example can give several spans, this is the index of the example containing this span of text. sample_index = tokenized_example['overflow_to_sample'] answers = examples[sample_index]['answers'] answer_starts = examples[sample_index]['answer_starts'] # Start/end character index of the answer in the text. start_char = answer_starts[0] end_char = start_char + len(answers[0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != 1: token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != 1: token_end_index -= 1 # Minus one more to reach actual text token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples[i]["start_positions"] = cls_index tokenized_examples[i]["end_positions"] = cls_index else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[ token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples[i]["start_positions"] = token_start_index - 1 while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples[i]["end_positions"] = token_end_index + 1 return tokenized_examples if args.do_train: if args.train_file: train_ds = load_dataset(task_name, data_files=args.train_file) else: train_ds = load_dataset(task_name, splits='train') train_ds.map(prepare_train_features, batched=True) train_batch_sampler = paddle.io.DistributedBatchSampler( train_ds, batch_size=args.batch_size, shuffle=True) train_batchify_fn = lambda samples, fn=Dict({ "input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id), "token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id), "start_positions": Stack(dtype="int64"), "end_positions": Stack(dtype="int64") }): fn(samples) train_data_loader = DataLoader( dataset=train_ds, batch_sampler=train_batch_sampler, collate_fn=train_batchify_fn, return_list=True) num_training_steps = args.max_steps if args.max_steps > 0 else len( train_data_loader) * args.num_train_epochs num_train_epochs = math.ceil(num_training_steps / len(train_data_loader)) lr_scheduler = LinearDecayWithWarmup( args.learning_rate, num_training_steps, args.warmup_proportion) # Generate parameter names needed to perform weight decay. # All bias and LayerNorm parameters are excluded. decay_params = [ p.name for n, p in model.named_parameters() if not any(nd in n for nd in ["bias", "norm"]) ] optimizer = paddle.optimizer.AdamW( learning_rate=lr_scheduler, epsilon=args.adam_epsilon, parameters=model.parameters(), weight_decay=args.weight_decay, apply_decay_param_fun=lambda x: x in decay_params) criterion = CrossEntropyLossForSQuAD() global_step = 0 tic_train = time.time() for epoch in range(num_train_epochs): for step, batch in enumerate(train_data_loader): global_step += 1 input_ids, token_type_ids, start_positions, end_positions = batch logits = model( input_ids=input_ids, token_type_ids=token_type_ids) loss = criterion(logits, (start_positions, end_positions)) if global_step % args.logging_steps == 0: print( "global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s" % (global_step, epoch + 1, step + 1, loss, args.logging_steps / (time.time() - tic_train))) tic_train = time.time() loss.backward() optimizer.step() lr_scheduler.step() optimizer.clear_grad() if global_step % args.save_steps == 0 or global_step == num_training_steps: if rank == 0: output_dir = os.path.join(args.output_dir, "model_%d" % global_step) if not os.path.exists(output_dir): os.makedirs(output_dir) # need better way to get inner model of DataParallel model_to_save = model._layers if isinstance( model, paddle.DataParallel) else model model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) print('Saving checkpoint to:', output_dir) if global_step == num_training_steps: break def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. # NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is # that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead. contexts = [examples[i]['context'] for i in range(len(examples))] questions = [examples[i]['question'] for i in range(len(examples))] tokenized_examples = tokenizer( questions, contexts, stride=args.doc_stride, max_seq_len=args.max_seq_length) # For validation, there is no need to compute start and end positions for i, tokenized_example in enumerate(tokenized_examples): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_example['token_type_ids'] # One example can give several spans, this is the index of the example containing this span of text. sample_index = tokenized_example['overflow_to_sample'] tokenized_examples[i]["example_id"] = examples[sample_index]['id'] # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples[i]["offset_mapping"] = [ (o if sequence_ids[k] == 1 else None) for k, o in enumerate(tokenized_example["offset_mapping"]) ] return tokenized_examples if args.do_predict and rank == 0: if args.predict_file: dev_ds = load_dataset(task_name, data_files=args.predict_file) else: dev_ds = load_dataset(task_name, splits='dev') dev_ds.map(prepare_validation_features, batched=True) dev_batch_sampler = paddle.io.BatchSampler( dev_ds, batch_size=args.batch_size, shuffle=False) dev_batchify_fn = lambda samples, fn=Dict({ "input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id), "token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id) }): fn(samples) dev_data_loader = DataLoader( dataset=dev_ds, batch_sampler=dev_batch_sampler, collate_fn=dev_batchify_fn, return_list=True) evaluate(model, dev_data_loader, args) if __name__ == "__main__": args = parse_args() run(args)
43.651917
120
0.648331
1,844
14,798
4.9718
0.221258
0.00709
0.01178
0.010689
0.348604
0.300175
0.280105
0.244764
0.218586
0.218586
0
0.006164
0.276456
14,798
338
121
43.781065
0.850098
0.213137
0
0.188841
0
0.004292
0.046909
0
0
0
0
0
0
1
0.030043
false
0
0.081545
0
0.128755
0.021459
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a3543c746387ad12029585c2e306e26ec984737
4,324
py
Python
Deep_Q_Network/DQN_for_FrozenLake_Discrete_Domain.py
quangnguyendang/Reinforcement_Learning
2551ce95068561c553500838ee6b976f001ba667
[ "MIT" ]
null
null
null
Deep_Q_Network/DQN_for_FrozenLake_Discrete_Domain.py
quangnguyendang/Reinforcement_Learning
2551ce95068561c553500838ee6b976f001ba667
[ "MIT" ]
null
null
null
Deep_Q_Network/DQN_for_FrozenLake_Discrete_Domain.py
quangnguyendang/Reinforcement_Learning
2551ce95068561c553500838ee6b976f001ba667
[ "MIT" ]
null
null
null
# Credit to https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0 import gym import tensorflow as tf import numpy as np import matplotlib.pyplot as plt env = gym.make('FrozenLake-v0') # NEURAL NETWORK IMPLEMENTATION tf.reset_default_graph() # Feature vector for current state representation input1 = tf.placeholder(shape=[1, env.observation_space.n], dtype=tf.float32) # tf.Variable(<initial-value>, name=<optional-name>) # tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None) # Weighting W vector in range 0 - 0.01 (like the way Andrew Ng did with *0.01 W = tf.Variable(tf.random_uniform([env.observation_space.n, env.action_space.n], 0, 0.01)) # Qout with shape [1, env.action_space.n] - Action state value for Q[s, a] with every a available at a state Qout = tf.matmul(input1, W) # Greedy action at a state predict = tf.argmax(Qout, axis=1) # Feature vector for next state representation nextQ = tf.placeholder(shape=[1, env.action_space.n], dtype=tf.float32) # Entropy loss loss = tf.reduce_sum(tf.square(Qout - nextQ)) trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1) updateModel = trainer.minimize(loss) # TRAIN THE NETWORK init = tf.global_variables_initializer() # Set learning parameters y = 0.99 e = 0.1 number_episodes = 2000 # List to store total rewards and steps per episode jList = [] rList = [] with tf.Session() as sess: sess.run(init) for i in range(number_episodes): print("Episode #{} is running!".format(i)) # First state s = env.reset() rAll = 0 d = False j = 0 # Q network while j < 200: # or While not d: j += 1 # Choose action by epsilon (e) greedy # print("s = ", s," --> Identity s:s+1: ", np.identity(env.observation_space.n)[s:s+1]) # s = 0 --> Identity s: s + 1: [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] # s = 1 --> Identity s: s + 1: [[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] # Identity [s:s+1] is a one-hot vector # Therefore W is the actual Q value a, allQ = sess.run([predict, Qout], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1]}) if np.random.rand(1) < e: a[0] = env.action_space.sample() s1, r, d, _ = env.step(a[0]) # Obtain next state Q value by feeding the new state throughout the network Q1 = sess.run(Qout, feed_dict={input1: np.identity(env.observation_space.n)[s1:s1+1]}) maxQ1 = np.max(Q1) targetQ = allQ targetQ[0, a[0]] = r + y * maxQ1 # Train our network using target and predicted Q values _, W1 = sess.run([updateModel, W], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1], nextQ: targetQ}) rAll += r s = s1 if d: e = 1./((i/50) + 10) break jList.append(j) rList.append(rAll) env.close() plt.figure() plt.plot(rList, label="Return - Q Learning") plt.show() plt.figure() plt.plot(jList, label="Steps - Q Learning") plt.show() # ------------------------------------------------------------------------- # TABULAR IMPLEMENTATION # # # Set learning parameters # lr = 0.8 # y = 0.95 # number_episodes = 20000 # # # Initial table with all zeros # Q = np.zeros([env.observation_space.n, env.action_space.n]) # # # List of reward and steps per episode # rList = [] # for i in range (number_episodes): # print("Episode #{} is running!".format(i)) # s = env.reset() # rAll = 0 # d = False # j = 0 # while j < 99: # j += 1 # # Choose an action by greedily (with noise) picking from Q table # # Because of the noise, it is epsilon-greedy with epsilon decreasing over time # a = np.argmax(Q[s, :] + np.random.rand(1, env.action_space.n)*(1./(i + 1))) # s1, r, d, _ = env.step(a) # # env.render() # # # Update Q table with new knowledge # Q[s, a] = Q[s, a] + lr * (r + y * np.max(Q[s1, :]) - Q[s, a]) # rAll += r # s = s1 # if d: # break # rList.append(rAll)
30.666667
155
0.586725
649
4,324
3.861325
0.311248
0.023144
0.029928
0.036712
0.2498
0.206704
0.171588
0.171588
0.143655
0.112929
0
0.042924
0.256475
4,324
140
156
30.885714
0.736547
0.514107
0
0.075472
0
0
0.035961
0
0
0
0
0
0
1
0
false
0
0.075472
0
0.075472
0.018868
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a3651a34d3b1893e6f70ebe64b9db39d329cd63
8,496
py
Python
testing/cross_language/util/supported_key_types.py
chanced/tink
9cc3a01ac0165b033ed51dc9d0812a98b4b6e305
[ "Apache-2.0" ]
null
null
null
testing/cross_language/util/supported_key_types.py
chanced/tink
9cc3a01ac0165b033ed51dc9d0812a98b4b6e305
[ "Apache-2.0" ]
null
null
null
testing/cross_language/util/supported_key_types.py
chanced/tink
9cc3a01ac0165b033ed51dc9d0812a98b4b6e305
[ "Apache-2.0" ]
1
2022-01-02T20:54:04.000Z
2022-01-02T20:54:04.000Z
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """All KeyTypes and which languages support them.""" # Placeholder for import for type annotations from tink import aead from tink import daead from tink import hybrid from tink import mac from tink import prf from tink import signature from tink import streaming_aead from tink.proto import tink_pb2 # All languages supported by cross-language tests. ALL_LANGUAGES = ['cc', 'java', 'go', 'python'] # All KeyTypes (without the prefix 'type.googleapis.com/google.crypto.tink.') AEAD_KEY_TYPES = [ 'AesEaxKey', 'AesGcmKey', 'AesGcmSivKey', 'AesCtrHmacAeadKey', 'ChaCha20Poly1305Key', 'XChaCha20Poly1305Key', ] DAEAD_KEY_TYPES = ['AesSivKey'] STREAMING_AEAD_KEY_TYPES = [ 'AesCtrHmacStreamingKey', 'AesGcmHkdfStreamingKey', ] HYBRID_PRIVATE_KEY_TYPES = ['EciesAeadHkdfPrivateKey'] MAC_KEY_TYPES = [ 'AesCmacKey', 'HmacKey', ] SIGNATURE_KEY_TYPES = [ 'EcdsaPrivateKey', 'Ed25519PrivateKey', 'RsaSsaPkcs1PrivateKey', 'RsaSsaPssPrivateKey', ] PRF_KEY_TYPES = [ 'AesCmacPrfKey', 'HmacPrfKey', 'HkdfPrfKey', ] ALL_KEY_TYPES = ( AEAD_KEY_TYPES + DAEAD_KEY_TYPES + STREAMING_AEAD_KEY_TYPES + HYBRID_PRIVATE_KEY_TYPES + MAC_KEY_TYPES + SIGNATURE_KEY_TYPES + PRF_KEY_TYPES) # All languages that are supported by a KeyType SUPPORTED_LANGUAGES = { 'AesEaxKey': ['cc', 'java', 'python'], 'AesGcmKey': ['cc', 'java', 'go', 'python'], 'AesGcmSivKey': ['cc', 'python'], 'AesCtrHmacAeadKey': ['cc', 'java', 'go', 'python'], 'ChaCha20Poly1305Key': ['java', 'go'], 'XChaCha20Poly1305Key': ['cc', 'java', 'go', 'python'], 'AesSivKey': ['cc', 'java', 'go', 'python'], 'AesCtrHmacStreamingKey': ['cc', 'java', 'go', 'python'], 'AesGcmHkdfStreamingKey': ['cc', 'java', 'go', 'python'], 'EciesAeadHkdfPrivateKey': ['cc', 'java', 'go', 'python'], 'AesCmacKey': ['cc', 'java', 'go', 'python'], 'HmacKey': ['cc', 'java', 'go', 'python'], 'EcdsaPrivateKey': ['cc', 'java', 'go', 'python'], 'Ed25519PrivateKey': ['cc', 'java', 'go', 'python'], 'RsaSsaPkcs1PrivateKey': ['cc', 'java', 'python'], 'RsaSsaPssPrivateKey': ['cc', 'java', 'python'], 'AesCmacPrfKey': ['cc', 'java', 'go', 'python'], 'HmacPrfKey': ['cc', 'java', 'go', 'python'], 'HkdfPrfKey': ['cc', 'java', 'go', 'python'], } KEY_TYPE_FROM_URL = { 'type.googleapis.com/google.crypto.tink.' + key_type: key_type for key_type in ALL_KEY_TYPES} # For each KeyType, a list of all KeyTemplate Names that must be supported. KEY_TEMPLATE_NAMES = { 'AesEaxKey': ['AES128_EAX', 'AES256_EAX'], 'AesGcmKey': ['AES128_GCM', 'AES256_GCM'], 'AesGcmSivKey': ['AES128_GCM_SIV', 'AES256_GCM_SIV'], 'AesCtrHmacAeadKey': ['AES128_CTR_HMAC_SHA256', 'AES256_CTR_HMAC_SHA256'], 'ChaCha20Poly1305Key': ['CHACHA20_POLY1305'], 'XChaCha20Poly1305Key': ['XCHACHA20_POLY1305'], 'AesSivKey': ['AES256_SIV'], 'AesCtrHmacStreamingKey': [ 'AES128_CTR_HMAC_SHA256_4KB', 'AES256_CTR_HMAC_SHA256_4KB', ], 'AesGcmHkdfStreamingKey': [ 'AES128_GCM_HKDF_4KB', 'AES256_GCM_HKDF_4KB', 'AES256_GCM_HKDF_1MB', ], 'EciesAeadHkdfPrivateKey': [ 'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM', 'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256' ], 'AesCmacKey': ['AES_CMAC'], 'HmacKey': [ 'HMAC_SHA256_128BITTAG', 'HMAC_SHA256_256BITTAG', 'HMAC_SHA512_256BITTAG', 'HMAC_SHA512_512BITTAG' ], 'EcdsaPrivateKey': [ 'ECDSA_P256', 'ECDSA_P384', 'ECDSA_P384_SHA384', 'ECDSA_P521', 'ECDSA_P256_IEEE_P1363', 'ECDSA_P384_IEEE_P1363', 'ECDSA_P384_SHA384_IEEE_P1363', 'ECDSA_P521_IEEE_P1363' ], 'Ed25519PrivateKey': ['ED25519'], 'RsaSsaPkcs1PrivateKey': [ 'RSA_SSA_PKCS1_3072_SHA256_F4', 'RSA_SSA_PKCS1_4096_SHA512_F4' ], 'RsaSsaPssPrivateKey': [ 'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4', 'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4' ], 'AesCmacPrfKey': ['AES_CMAC_PRF'], 'HmacPrfKey': ['HMAC_PRF_SHA256', 'HMAC_PRF_SHA512'], 'HkdfPrfKey': ['HKDF_PRF_SHA256'], } # KeyTemplate (as Protobuf) for each KeyTemplate name. KEY_TEMPLATE = { 'AES128_EAX': aead.aead_key_templates.AES128_EAX, 'AES256_EAX': aead.aead_key_templates.AES256_EAX, 'AES128_GCM': aead.aead_key_templates.AES128_GCM, 'AES256_GCM': aead.aead_key_templates.AES256_GCM, 'AES128_GCM_SIV': aead.aead_key_templates.AES128_GCM_SIV, 'AES256_GCM_SIV': aead.aead_key_templates.AES256_GCM_SIV, 'AES128_CTR_HMAC_SHA256': aead.aead_key_templates.AES128_CTR_HMAC_SHA256, 'AES256_CTR_HMAC_SHA256': aead.aead_key_templates.AES256_CTR_HMAC_SHA256, 'CHACHA20_POLY1305': tink_pb2.KeyTemplate( type_url=('type.googleapis.com/google.crypto.tink.' + 'ChaCha20Poly1305Key'), output_prefix_type=tink_pb2.TINK), 'XCHACHA20_POLY1305': aead.aead_key_templates.XCHACHA20_POLY1305, 'AES256_SIV': daead.deterministic_aead_key_templates.AES256_SIV, 'AES128_CTR_HMAC_SHA256_4KB': streaming_aead.streaming_aead_key_templates.AES128_CTR_HMAC_SHA256_4KB, 'AES256_CTR_HMAC_SHA256_4KB': streaming_aead.streaming_aead_key_templates.AES256_CTR_HMAC_SHA256_4KB, 'AES128_GCM_HKDF_4KB': streaming_aead.streaming_aead_key_templates.AES128_GCM_HKDF_4KB, 'AES256_GCM_HKDF_4KB': streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_4KB, 'AES256_GCM_HKDF_1MB': streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_1MB, 'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM': hybrid.hybrid_key_templates.ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM, 'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256': hybrid.hybrid_key_templates .ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256, 'AES_CMAC': mac.mac_key_templates.AES_CMAC, 'HMAC_SHA256_128BITTAG': mac.mac_key_templates.HMAC_SHA256_128BITTAG, 'HMAC_SHA256_256BITTAG': mac.mac_key_templates.HMAC_SHA256_256BITTAG, 'HMAC_SHA512_256BITTAG': mac.mac_key_templates.HMAC_SHA512_256BITTAG, 'HMAC_SHA512_512BITTAG': mac.mac_key_templates.HMAC_SHA512_512BITTAG, 'ECDSA_P256': signature.signature_key_templates.ECDSA_P256, 'ECDSA_P384': signature.signature_key_templates.ECDSA_P384, 'ECDSA_P384_SHA384': signature.signature_key_templates.ECDSA_P384_SHA384, 'ECDSA_P521': signature.signature_key_templates.ECDSA_P521, 'ECDSA_P256_IEEE_P1363': signature.signature_key_templates.ECDSA_P256_IEEE_P1363, 'ECDSA_P384_IEEE_P1363': signature.signature_key_templates.ECDSA_P384_IEEE_P1363, 'ECDSA_P384_SHA384_IEEE_P1363': signature.signature_key_templates.ECDSA_P384_SHA384_IEEE_P1363, 'ECDSA_P521_IEEE_P1363': signature.signature_key_templates.ECDSA_P521_IEEE_P1363, 'ED25519': signature.signature_key_templates.ED25519, 'RSA_SSA_PKCS1_3072_SHA256_F4': signature.signature_key_templates.RSA_SSA_PKCS1_3072_SHA256_F4, 'RSA_SSA_PKCS1_4096_SHA512_F4': signature.signature_key_templates.RSA_SSA_PKCS1_4096_SHA512_F4, 'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4': signature.signature_key_templates.RSA_SSA_PSS_3072_SHA256_SHA256_32_F4, 'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4': signature.signature_key_templates.RSA_SSA_PSS_4096_SHA512_SHA512_64_F4, 'AES_CMAC_PRF': prf.prf_key_templates.AES_CMAC, 'HMAC_PRF_SHA256': prf.prf_key_templates.HMAC_SHA256, 'HMAC_PRF_SHA512': prf.prf_key_templates.HMAC_SHA512, 'HKDF_PRF_SHA256': prf.prf_key_templates.HKDF_SHA256, } SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME = { name: SUPPORTED_LANGUAGES[KEY_TYPE_FROM_URL[template.type_url]] for name, template in KEY_TEMPLATE.items() }
37.263158
79
0.711982
1,031
8,496
5.412221
0.163919
0.083871
0.021505
0.037634
0.467921
0.431183
0.307168
0.251971
0.166667
0.109857
0
0.102619
0.173023
8,496
227
80
37.427313
0.691574
0.107345
0
0.035176
0
0
0.365115
0.174028
0
0
0
0
0
1
0
false
0
0.040201
0
0.040201
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a36ee854524bdd692e5d46cc0bfba0c999e570b
3,625
py
Python
signer.py
chapeltech/remote-signer
83d083ed7e8c7123187ba70ee3132b898e8ef02e
[ "MIT" ]
39
2018-07-08T01:01:18.000Z
2022-01-03T13:48:10.000Z
signer.py
chapeltech/remote-signer
83d083ed7e8c7123187ba70ee3132b898e8ef02e
[ "MIT" ]
4
2019-10-04T11:15:15.000Z
2022-02-03T00:17:47.000Z
signer.py
chapeltech/remote-signer
83d083ed7e8c7123187ba70ee3132b898e8ef02e
[ "MIT" ]
19
2018-09-20T11:52:25.000Z
2022-02-02T19:21:04.000Z
#!/usr/bin/env python3 ######################################################### # Written by Carl Youngblood, [email protected] # Copyright (c) 2018 Blockscale LLC # released under the MIT license ######################################################### from flask import Flask, request, Response, json, jsonify from src.remote_signer import RemoteSigner from os import path import logging logging.basicConfig(filename='./remote-signer.log', format='%(asctime)s %(message)s', level=logging.INFO) app = Flask(__name__) # sample config used for testing config = { 'hsm_username': 'resigner', 'hsm_slot': 1, 'hsm_lib': '/opt/cloudhsm/lib/libcloudhsm_pkcs11.so', 'node_addr': 'http://node.internal:8732', 'keys': { 'tz3aTaJ3d7Rh4yXpereo4yBm21xrs4bnzQvW': { 'public_key': 'p2pk67jx4rEadFpbHdiPhsKxZ4KCoczLWqsEpNarWZ7WQ1SqKMf7JsS', 'private_handle': 7, 'public_handle': 9 } } } logging.info('Opening keys.json') if path.isfile('keys.json'): logging.info('Found keys.json') with open('keys.json', 'r') as myfile: json_blob = myfile.read().replace('\n', '') logging.info('Parsed keys.json successfully as JSON') config = json.loads(json_blob) logging.info('Config contains: {}'.format(json.dumps(config, indent=2))) @app.route('/keys/<key_hash>', methods=['POST']) def sign(key_hash): response = None try: data = request.get_json(force=True) if key_hash in config['keys']: logging.info('Found key_hash {} in config'.format(key_hash)) key = config['keys'][key_hash] logging.info('Attempting to sign {}'.format(data)) rs = RemoteSigner(config, data) response = jsonify({ 'signature': rs.sign(key['private_handle']) }) logging.info('Response is {}'.format(response)) else: logging.warning("Couldn't find key {}".format(key_hash)) response = Response('Key not found', status=404) except Exception as e: data = {'error': str(e)} logging.error('Exception thrown during request: {}'.format(str(e))) response = app.response_class( response=json.dumps(data), status=500, mimetype='application/json' ) logging.info('Returning flask response {}'.format(response)) return response @app.route('/keys/<key_hash>', methods=['GET']) def get_public_key(key_hash): response = None try: if key_hash in config['keys']: key = config['keys'][key_hash] response = jsonify({ 'public_key': key['public_key'] }) logging.info('Found public key {} for key hash {}'.format(key['public_key'], key_hash)) else: logging.warning("Couldn't public key for key hash {}".format(key_hash)) response = Response('Key not found', status=404) except Exception as e: data = {'error': str(e)} logging.error('Exception thrown during request: {}'.format(str(e))) response = app.response_class( response=json.dumps(data), status=500, mimetype='application/json' ) logging.info('Returning flask response {}'.format(response)) return response @app.route('/authorized_keys', methods=['GET']) def authorized_keys(): return app.response_class( response=json.dumps({}), status=200, mimetype='application/json' ) if __name__ == '__main__': app.run(host='127.0.0.1', port=5000, debug=True)
33.564815
105
0.592552
410
3,625
5.121951
0.34878
0.05
0.035714
0.021429
0.405714
0.341905
0.285238
0.262857
0.262857
0.262857
0
0.020268
0.237793
3,625
107
106
33.878505
0.739776
0.045517
0
0.406977
0
0
0.267884
0.038911
0
0
0
0
0
1
0.034884
false
0
0.046512
0.011628
0.116279
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a37a73802d1db18a333fdc568416dbf6367829d
3,658
py
Python
unwind.py
0x1F9F1/binja-msvc
be2577c22c8d37fd1e2e211f80b1c9a920705bd2
[ "MIT" ]
9
2019-02-08T10:01:39.000Z
2021-04-29T12:27:34.000Z
unwind.py
DatBrick/binja-msvc
751ffc1450c569bad23ac67a761d0f1fbd4ca4c4
[ "MIT" ]
1
2019-07-04T20:09:57.000Z
2019-07-12T11:10:15.000Z
unwind.py
DatBrick/binja-msvc
751ffc1450c569bad23ac67a761d0f1fbd4ca4c4
[ "MIT" ]
2
2019-03-03T13:00:14.000Z
2020-05-01T05:35:04.000Z
from binaryninja import log from .utils import BinjaStruct, read_pe_header, split_bits, update_percentage # https://msdn.microsoft.com/en-us/library/ft9x1kdx.aspx RUNTIME_FUNCTION_t = BinjaStruct('<III', names = ('BeginAddress', 'EndAddress', 'UnwindData')) def read_runtime_function(view, address): runtime_function, address = RUNTIME_FUNCTION_t.read(view, address, 4) if runtime_function is not None: runtime_function['BeginAddress'] += view.start runtime_function['EndAddress'] += view.start runtime_function['UnwindData'] += view.start return runtime_function, address UNWIND_INFO_t = BinjaStruct('<BBBB', names = ('VersionAndFlags', 'SizeOfProlog', 'CountOfCodes', 'FrameRegisterAndOffset')) UNW_FLAG_NHANDLER = 0x0 UNW_FLAG_EHANDLER = 0x1 UNW_FLAG_UHANDLER = 0x2 UNW_FLAG_FHANDLER = 0x3 UNW_FLAG_CHAININFO = 0x4 def read_unwind_info(view, address): unwind_info, address = UNWIND_INFO_t.read(view, address) if unwind_info is not None: split_bits(unwind_info, 'VersionAndFlags', [ ('Version', 0, 3), ('Flags', 3, 5) ]) split_bits(unwind_info, 'FrameRegisterAndOffset', [ ('FrameRegister', 0, 4), ('FrameOffset', 4, 4) ]) if unwind_info['Version'] == 1: unwind_codes = [ ] for i in range(unwind_info['CountOfCodes']): unwind_code, address = read_unwind_code(view, address) unwind_codes.append(unwind_code) unwind_info['UnwindCodes'] = unwind_codes if unwind_info['Flags'] & UNW_FLAG_CHAININFO: unwind_info['FunctionEntry'], address = read_runtime_function(view, address) return unwind_info, address UNWIND_CODE_t = BinjaStruct('<BB', names = ('CodeOffset', 'UnwindOpAndInfo')) def read_unwind_code(view, address): unwind_code, address = UNWIND_CODE_t.read(view, address) if unwind_code is not None: split_bits(unwind_code, 'UnwindOpAndInfo', [ ('UnwindOp', 0, 4), ('OpInfo', 4, 4) ]) return unwind_code, address def parse_unwind_info(thread, view): base_address = view.start pe = read_pe_header(view) unwind_directory = pe.OPTIONAL_HEADER.DATA_DIRECTORY[3] unwind_entrys = base_address + unwind_directory.VirtualAddress unwind_entrys_end = unwind_entrys + unwind_directory.Size funcs = set() log.log_info('Exception Data @ 0x{0:X} => 0x{1:X}'.format(unwind_entrys, unwind_entrys_end)) for runtime_address in range(unwind_entrys, unwind_entrys_end, 12): if thread.cancelled: break update_percentage(thread, unwind_entrys, unwind_entrys_end, runtime_address, 'Parsing Unwind Info - Found {0} functions'.format(len(funcs))) runtime_function, _ = read_runtime_function(view, runtime_address) if runtime_function is None: continue start_address = runtime_function['BeginAddress'] if not view.is_offset_executable(start_address): continue if view.get_functions_containing(start_address): continue info_address = runtime_function['UnwindData'] unwind_info, _ = read_unwind_info(view, info_address) if unwind_info is None: continue if 'FunctionEntry' in unwind_info: continue funcs.add(start_address) if not thread.cancelled: thread.progress = 'Creating {0} Function'.format(len(funcs)) log.log_info('Found {0} functions'.format(len(funcs))) for func in funcs: view.create_user_function(func)
31
148
0.667578
429
3,658
5.410256
0.268065
0.081861
0.037915
0.029729
0.168031
0.09651
0.028436
0
0
0
0
0.012807
0.231547
3,658
117
149
31.264957
0.812878
0.014762
0
0.103896
0
0
0.12854
0.012215
0
0
0.004164
0
0
1
0.051948
false
0
0.025974
0
0.116883
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a37f39c3ffc420ffcb4173ab24c22f5ec606276
2,538
py
Python
pixloc/visualization/viz_3d.py
jmorlana/pixloc
90f7e968398252e8557b284803ee774cb8d80cd0
[ "Apache-2.0" ]
457
2021-03-17T00:39:33.000Z
2022-03-30T02:38:19.000Z
pixloc/visualization/viz_3d.py
jmorlana/pixloc
90f7e968398252e8557b284803ee774cb8d80cd0
[ "Apache-2.0" ]
31
2021-03-17T07:35:34.000Z
2022-03-31T07:07:56.000Z
pixloc/visualization/viz_3d.py
jmorlana/pixloc
90f7e968398252e8557b284803ee774cb8d80cd0
[ "Apache-2.0" ]
56
2021-03-17T05:55:09.000Z
2022-03-15T01:38:35.000Z
""" 3D visualization primitives based on Plotly. We might want to instead use a more powerful library like Open3D. Plotly however supports animations, buttons and sliders. 1) Initialize a figure with `fig = init_figure()` 2) Plot points, cameras, lines, or create a slider animation. 3) Call `fig.show()` to render the figure. """ import plotly.graph_objects as go import numpy as np from ..pixlib.geometry.utils import to_homogeneous def init_figure(height=800): """Initialize a 3D figure.""" fig = go.Figure() fig.update_layout( height=height, scene_camera=dict( eye=dict(x=0., y=-.1, z=-2), up=dict(x=0, y=-1., z=0)), scene=dict( xaxis=dict(showbackground=False), yaxis=dict(showbackground=False), aspectmode='data', dragmode='orbit'), margin=dict(l=0, r=0, b=0, t=0, pad=0)) # noqa E741 return fig def plot_points(fig, pts, color='rgba(255, 0, 0, 1)', ps=2): """Plot a set of 3D points.""" x, y, z = pts.T tr = go.Scatter3d( x=x, y=y, z=z, mode='markers', marker_size=ps, marker_color=color, marker_line_width=.2) fig.add_trace(tr) def plot_camera(fig, R, t, K, color='rgb(0, 0, 255)'): """Plot a camera as a cone with camera frustum.""" x, y, z = t u, v, w = R @ -np.array([0, 0, 1]) tr = go.Cone( x=[x], y=[y], z=[z], u=[u], v=[v], w=[w], anchor='tip', showscale=False, colorscale=[[0, color], [1, color]], sizemode='absolute') fig.add_trace(tr) W, H = K[0, 2]*2, K[1, 2]*2 corners = np.array([[0, 0], [W, 0], [W, H], [0, H], [0, 0]]) corners = to_homogeneous(corners) @ np.linalg.inv(K).T corners = (corners/2) @ R.T + t x, y, z = corners.T tr = go.Scatter3d( x=x, y=y, z=z, line=dict(color='rgba(0, 0, 0, .5)'), marker=dict(size=0.0001), showlegend=False) fig.add_trace(tr) def create_slider_animation(fig, traces): """Create a slider that animates a list of traces (e.g. 3D points).""" slider = {'steps': []} frames = [] fig.add_trace(traces[0]) idx = len(fig.data) - 1 for i, tr in enumerate(traces): frames.append(go.Frame(name=str(i), traces=[idx], data=[tr])) step = {"args": [ [str(i)], {"frame": {"redraw": True}, "mode": "immediate"}], "label": i, "method": "animate"} slider['steps'].append(step) fig.frames = tuple(frames) fig.layout.sliders = (slider,)
32.126582
74
0.566982
392
2,538
3.622449
0.387755
0.009859
0.030986
0.008451
0.067606
0.04507
0.028169
0.028169
0.028169
0.028169
0
0.036489
0.254925
2,538
78
75
32.538462
0.714437
0.194247
0
0.092593
0
0
0.065444
0
0
0
0
0
0
1
0.074074
false
0
0.055556
0
0.148148
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a37f74b88dcc7ed94cf7a22b08f15fb01357b23
1,882
py
Python
day04/c.py
Net-Mist/advent_of_code2021
124d773356bee2794294800de7673d5fac24db0a
[ "MIT" ]
1
2022-01-03T09:21:33.000Z
2022-01-03T09:21:33.000Z
day04/c.py
Net-Mist/advent_of_code2021
124d773356bee2794294800de7673d5fac24db0a
[ "MIT" ]
null
null
null
day04/c.py
Net-Mist/advent_of_code2021
124d773356bee2794294800de7673d5fac24db0a
[ "MIT" ]
null
null
null
import numpy as np GRID_SIZE = 5 def read_bingo_grid(lines: list[str]) -> list[list[int]]: return [[int(n) for n in line.split()] for line in lines] def bingo_step(grids: np.ndarray, checked_grids: np.ndarray, number: int) -> None: checked_grids[np.where(grids == number)] = True def check_victory(check_grids: np.ndarray) -> set[int]: """return empty set if no victory, else set of id of the wining grids""" return set(np.where(check_grids.sum(axis=1).max(axis=1) == 5)[0]).union( np.where(check_grids.sum(axis=2).max(axis=1) == 5)[0] ) def sum_grid(grid: np.ndarray, checked_grid: np.ndarray) -> int: grid[checked_grid] = 0 return grid.sum() def main() -> None: with open("input.txt") as f: lines = f.readlines() random_numbers = [int(n) for n in lines[0].split(",")] grids = np.array([read_bingo_grid(lines[i : i + GRID_SIZE]) for i in range(2, len(lines), 1 + GRID_SIZE)]) checked_grids = np.array([[[False for _ in range(GRID_SIZE)] for _ in range(GRID_SIZE)] for _ in range(len(grids))]) win = False i = 0 q1_done = False while not win: bingo_step(grids, checked_grids, random_numbers[i]) winning_set = check_victory(checked_grids) if len(winning_set) == 1 and not q1_done: index = list(winning_set)[0] s = sum_grid(grids[index], checked_grids[index]) print("part1:", s * random_numbers[i]) q1_done = True if len(grids) == len(winning_set) + 1: index_last_to_win = list(set(range(len(grids))).difference(winning_set))[0] if len(grids) == len(winning_set): s = sum_grid(grids[index_last_to_win], checked_grids[index_last_to_win]) print("part2:", random_numbers[i], s, random_numbers[i] * s) return i += 1 if __name__ == "__main__": main()
34.218182
120
0.624867
292
1,882
3.815068
0.260274
0.075404
0.050269
0.037702
0.219031
0.125673
0.041293
0.041293
0
0
0
0.016552
0.229543
1,882
54
121
34.851852
0.751724
0.035069
0
0
0
0
0.016575
0
0
0
0
0
0
1
0.128205
false
0
0.025641
0.025641
0.25641
0.051282
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a3aa16cb3dbe2b4a517472d48c9588e47d4f479
1,497
py
Python
altair/vegalite/v2/examples/us_population_pyramid_over_time.py
hugovk/altair
a3c9f06790f7a8c5c7e2c98278d0f69e4630b5be
[ "BSD-3-Clause" ]
1
2022-03-13T21:42:09.000Z
2022-03-13T21:42:09.000Z
altair/vegalite/v2/examples/us_population_pyramid_over_time.py
RoyMachineLearning/altair
74a765b373694776e63d224d99536975cc173810
[ "BSD-3-Clause" ]
null
null
null
altair/vegalite/v2/examples/us_population_pyramid_over_time.py
RoyMachineLearning/altair
74a765b373694776e63d224d99536975cc173810
[ "BSD-3-Clause" ]
null
null
null
''' US Population Pyramid Over Time =============================== A population pyramid shows the distribution of age groups within a population. It uses a slider widget that is bound to the year to visualize the age distribution over time. ''' # category: case studies import altair as alt from altair.expr import datum, if_ from vega_datasets import data pop = data.population.url slider = alt.binding_range(min=1850, max=2000, step=10) select_year = alt.selection_single(name='year', fields=['year'], bind=slider) base = alt.Chart(pop).add_selection( select_year ).transform_filter( select_year ).transform_calculate( gender=if_(datum.sex == 1, 'Male', 'Female') ) title = alt.Axis(title='population') color_scale = alt.Scale(domain=['Male', 'Female'], range=['#1f77b4', '#e377c2']) left = base.transform_filter( datum.gender == 'Female' ).encode( y=alt.X('age:O', axis=None), x=alt.X('sum(people):Q', axis=title, sort=alt.SortOrder('descending')), color=alt.Color('gender:N', scale=color_scale, legend=None) ).mark_bar().properties(title='Female') middle = base.encode( y=alt.X('age:O', axis=None), text=alt.Text('age:Q'), ).mark_text().properties(width=20) right = base.transform_filter( datum.gender == 'Male' ).encode( y=alt.X('age:O', axis=None), x=alt.X('sum(people):Q', axis=title), color=alt.Color('gender:N', scale=color_scale, legend=None) ).mark_bar().properties(title='Male') left | middle | right
29.352941
78
0.674683
217
1,497
4.56682
0.428571
0.020182
0.030272
0.0333
0.313824
0.25328
0.25328
0.25328
0.230071
0.230071
0
0.016393
0.144289
1,497
51
79
29.352941
0.757221
0.174349
0
0.257143
0
0
0.117168
0
0
0
0
0
0
1
0
false
0
0.085714
0
0.085714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a3b3f3a85478c2b401e7083ce3f440c82013e30
987
py
Python
mtp_api/apps/core/migrations/0004_token.py
ministryofjustice/mtp-api
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
[ "MIT" ]
5
2016-01-05T12:21:35.000Z
2020-10-28T17:06:02.000Z
mtp_api/apps/core/migrations/0004_token.py
ministryofjustice/mtp-api
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
[ "MIT" ]
209
2015-06-12T09:39:41.000Z
2022-03-21T16:01:19.000Z
mtp_api/apps/core/migrations/0004_token.py
ministryofjustice/mtp-api
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
[ "MIT" ]
1
2021-04-11T06:19:23.000Z
2021-04-11T06:19:23.000Z
from django.db import migrations, models import django.utils.timezone import model_utils.fields class Migration(migrations.Migration): dependencies = [ ('core', '0003_auto_20180404_1515'), ] operations = [ migrations.CreateModel( name='Token', fields=[ ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('name', models.CharField(max_length=20, primary_key=True, serialize=False)), ('token', models.TextField()), ('expires', models.DateTimeField(blank=True, null=True)), ], options={ 'ordering': ('name',), 'permissions': (('view_token', 'Can view token'),), }, ), ]
37.961538
147
0.591692
90
987
6.366667
0.544444
0.057592
0.099476
0.09075
0.184991
0.184991
0.184991
0.184991
0.184991
0
0
0.024965
0.269504
987
25
148
39.48
0.769764
0
0
0
0
0
0.126646
0.023303
0
0
0
0
0
1
0
false
0
0.130435
0
0.26087
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a3ca54d0e30bc25beb86e00254a401833904b9e
6,885
py
Python
network_checker/dhcp_checker/utils.py
Zipfer/fuel-web
c6c4032eb6e29474e2be0318349265bdb566454c
[ "Apache-2.0" ]
null
null
null
network_checker/dhcp_checker/utils.py
Zipfer/fuel-web
c6c4032eb6e29474e2be0318349265bdb566454c
[ "Apache-2.0" ]
null
null
null
network_checker/dhcp_checker/utils.py
Zipfer/fuel-web
c6c4032eb6e29474e2be0318349265bdb566454c
[ "Apache-2.0" ]
null
null
null
# Copyright 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import re import subprocess import sys from scapy import all as scapy DHCP_OFFER_COLUMNS = ('iface', 'mac', 'server_ip', 'server_id', 'gateway', 'dport', 'message', 'yiaddr') def command_util(*command): """object with stderr and stdout """ return subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def _check_vconfig(): """Check vconfig installed or not """ return not command_util('which', 'vconfig').stderr.read() def _iface_state(iface): """For a given iface return it's state returns UP, DOWN, UNKNOWN """ state = command_util('ip', 'link', 'show', iface).stdout.read() search_result = re.search(r'.*<(?P<state>.*)>.*', state) if search_result: state_list = search_result.groupdict().get('state', []) if 'UP' in state_list: return 'UP' else: return 'DOWN' return 'UNKNOWN' def check_network_up(iface): return _iface_state(iface) == 'UP' def check_iface_exist(iface): """Check provided interface exists """ return not command_util("ip", "link", "show", iface).stderr.read() def filtered_ifaces(ifaces): for iface in ifaces: if not check_iface_exist(iface): sys.stderr.write('Iface {0} does not exist.'.format(iface)) else: if not check_network_up(iface): sys.stderr.write('Network for iface {0} is down.'.format( iface)) else: yield iface def pick_ip(range_start, range_end): """Given start_range, end_range generate list of ips >>> next(pick_ip('192.168.1.10','192.168.1.13')) '192.168.1.10' """ split_address = lambda ip_address: \ [int(item) for item in ip_address.split('.')] range_start = split_address(range_start) range_end = split_address(range_end) i = 0 # ipv4 subnet cant be longer that 4 items while i < 4: # 255 - end of subnet if not range_start[i] == range_end[i] and range_start[i] < 255: yield '.'.join([str(item) for item in range_start]) range_start[i] += 1 else: i += 1 def get_item_properties(item, columns): """Get specified in columns properties, with preserved order. Required for correct cli table generation :param item: dict :param columns: list with arbitrary keys """ properties = [] for key in columns: properties.append(item.get(key, '')) return properties def format_options(options): """Util for serializing dhcp options @options = [1,2,3] >>> format_options([1, 2, 3]) '\x01\x02\x03' """ return "".join((chr(item) for item in options)) def _dhcp_options(dhcp_options): """Dhcp options returned by scapy is not in usable format [('message-type', 2), ('server_id', '192.168.0.5'), ('name_server', '192.168.0.1', '192.168.0.2'), 'end'] """ for option in dhcp_options: if isinstance(option, (tuple, list)): header = option[0] if len(option[1:]) > 1: yield (header, option) else: yield (header, option[1]) def format_answer(ans, iface): dhcp_options = dict(_dhcp_options(ans[scapy.DHCP].options)) results = ( iface, ans[scapy.Ether].src, ans[scapy.IP].src, dhcp_options['server_id'], ans[scapy.BOOTP].giaddr, ans[scapy.UDP].sport, scapy.DHCPTypes[dhcp_options['message-type']], ans[scapy.BOOTP].yiaddr) return dict(zip(DHCP_OFFER_COLUMNS, results)) def single_format(func): """Manage format of dhcp response """ @functools.wraps(func) def formatter(*args, **kwargs): iface = args[0] ans = func(*args, **kwargs) #scapy stores all sequence of requests #so ans[0][1] would be response to first request return [format_answer(response[1], iface) for response in ans] return formatter def multiproc_map(func): # multiproc map could not work with format *args @functools.wraps(func) def workaround(*args, **kwargs): args = args[0] if isinstance(args[0], (tuple, list)) else args return func(*args, **kwargs) return workaround def filter_duplicated_results(func): # due to network infra on broadcast multiple duplicated results # returned. This helper filter them out @functools.wraps(func) def wrapper(*args, **kwargs): resp = func(*args, **kwargs) return (dict(t) for t in set([tuple(d.items()) for d in resp])) return wrapper class VlansContext(object): """Contains all logic to manage vlans """ def __init__(self, config): """Initialize VlansContext @config - list or tuple of (iface, vlan) pairs """ self.config = config def __enter__(self): for iface, vlans in self.config.iteritems(): vifaces = [] for vlan in vlans: if vlan > 0: vifaces.append('{0}.{1}'.format(iface, vlan)) yield str(iface), vifaces def __exit__(self, type, value, trace): pass class IfaceState(object): """Context manager to control state of iface when dhcp checker is running """ def __init__(self, iface, rollback=True, retry=3): self.rollback = rollback self.retry = retry self.iface = iface self.pre_iface_state = _iface_state(iface) self.iface_state = self.pre_iface_state self.post_iface_state = '' def iface_up(self): while self.retry and self.iface_state != 'UP': command_util('ifconfig', self.iface, 'up') self.iface_state = _iface_state(self.iface) self.retry -= 1 if self.iface_state != 'UP': raise EnvironmentError( 'Tried my best to ifup iface {0}.'.format(self.iface)) def __enter__(self): self.iface_up() return self.iface def __exit__(self, exc_type, exc_val, exc_tb): if self.pre_iface_state != 'UP' and self.rollback: command_util('ifconfig', self.iface, 'down') self.post_iface_state = _iface_state(self.iface)
30.464602
78
0.615832
899
6,885
4.579533
0.303671
0.034005
0.021861
0.009473
0.040321
0.026718
0
0
0
0
0
0.020154
0.264924
6,885
225
79
30.6
0.793321
0.260857
0
0.079365
0
0
0.054742
0
0
0
0
0
0
1
0.190476
false
0.007937
0.039683
0.007937
0.380952
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a3e36ced124cb7dabea478a1f4b328edd22757f
1,344
py
Python
news_access.py
HydeJackal/TwitterWeeklyNewsBot
64fc6b9e7d74bafd26f4dcdfe28e835ece1cee9b
[ "MIT" ]
null
null
null
news_access.py
HydeJackal/TwitterWeeklyNewsBot
64fc6b9e7d74bafd26f4dcdfe28e835ece1cee9b
[ "MIT" ]
null
null
null
news_access.py
HydeJackal/TwitterWeeklyNewsBot
64fc6b9e7d74bafd26f4dcdfe28e835ece1cee9b
[ "MIT" ]
null
null
null
import json import urllib.request import credentials from datetime import datetime, timedelta class NewsAPI: def __init__(self, nyt_api): self.nyt_access = nyt_api def get_nyt_last_week_articles(self, topic, today): delta = timedelta(weeks = 1) last_week = today - delta begin_date = last_week.strftime('%Y%m%d') url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json?q=' + topic + '&begin_date=' + begin_date + '&sort=best&type_of_material=Article&api-key=' + self.nyt_access try: json_url = urllib.request.urlopen(url) articles = json.loads(json_url.read()) except: raise RuntimeError('Failed to retrive New York Times data.') if articles['status'] != 'OK': num_of_articles = articles['response']['docs'].length() if num_of_articles > 5: return articles['response']['docs'][0:4], articles['response']['meta']['hits'] else: return articles['response']['docs'][0:num_of_articles - 1], articles['response']['meta']['hits'] else: raise RuntimeError('Failed to find any New York Times articles with query.') api = NewsAPI(credentials.NYT_API) date_time_obj = datetime.now() api.get_nyt_last_week_articles('election', date_time_obj)
42
180
0.638393
172
1,344
4.77907
0.482558
0.097324
0.047445
0.034063
0.187348
0
0
0
0
0
0
0.006776
0.231399
1,344
32
181
42
0.788964
0
0
0.071429
0
0
0.220818
0.032714
0
0
0
0
0
1
0.071429
false
0
0.142857
0
0.321429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a3e37aebc3bcbec9a89c90db3f81c339c737670
8,847
py
Python
tests/unit/test_trial_component.py
owen-t/sagemaker-experiments-1
ef2af4009c3a5c6a63db5cec6b9de6c614dfdd66
[ "Apache-2.0" ]
null
null
null
tests/unit/test_trial_component.py
owen-t/sagemaker-experiments-1
ef2af4009c3a5c6a63db5cec6b9de6c614dfdd66
[ "Apache-2.0" ]
null
null
null
tests/unit/test_trial_component.py
owen-t/sagemaker-experiments-1
ef2af4009c3a5c6a63db5cec6b9de6c614dfdd66
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from smexperiments import trial_component, api_types import datetime import pytest import unittest.mock @pytest.fixture def sagemaker_boto_client(): return unittest.mock.Mock() def test_create(sagemaker_boto_client): sagemaker_boto_client.create_trial_component.return_value = { "TrialComponentArn": "bazz", } obj = trial_component.TrialComponent.create( trial_component_name="foo", display_name="bar", sagemaker_boto_client=sagemaker_boto_client ) sagemaker_boto_client.create_trial_component.assert_called_with(TrialComponentName="foo", DisplayName="bar") assert "foo" == obj.trial_component_name assert "bar" == obj.display_name assert "bazz" == obj.trial_component_arn def test_load(sagemaker_boto_client): now = datetime.datetime.now(datetime.timezone.utc) sagemaker_boto_client.describe_trial_component.return_value = { "TrialComponentArn": "A", "TrialComponentName": "B", "DisplayName": "C", "Status": {"PrimaryStatus": "InProgress", "Message": "D"}, "Parameters": {"E": {"NumberValue": 1.0}, "F": {"StringValue": "G"}}, "InputArtifacts": {"H": {"Value": "s3://foo/bar", "MediaType": "text/plain"}}, "OutputArtifacts": {"I": {"Value": "s3://whizz/bang", "MediaType": "text/plain"}}, "Metrics": [ { "MetricName": "J", "Count": 1, "Min": 1.0, "Max": 2.0, "Avg": 3.0, "StdDev": 4.0, "SourceArn": "K", "Timestamp": now, } ], } obj = trial_component.TrialComponent.load(trial_component_name="foo", sagemaker_boto_client=sagemaker_boto_client) sagemaker_boto_client.describe_trial_component.assert_called_with(TrialComponentName="foo") assert "A" == obj.trial_component_arn assert "B" == obj.trial_component_name assert "C" == obj.display_name assert api_types.TrialComponentStatus(primary_status="InProgress", message="D") == obj.status assert {"E": 1.0, "F": "G"} == obj.parameters assert {"H": api_types.TrialComponentArtifact(value="s3://foo/bar", media_type="text/plain")} assert {"I": api_types.TrialComponentArtifact(value="s3://whizz/bang", media_type="text/plain")} assert [ api_types.TrialComponentMetricSummary( metric_name="J", count=1, min=1.0, max=2.0, avg=3.0, std_dev=4.0, source_arn="K", timestamp=now ) ] def test_list(sagemaker_boto_client): start_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=1) end_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=2) creation_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=3) last_modified_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=4) sagemaker_boto_client.list_trial_components.side_effect = [ { "TrialComponentSummaries": [ { "TrialComponentName": "A" + str(i), "TrialComponentArn": "B" + str(i), "DisplayName": "C" + str(i), "SourceArn": "D" + str(i), "Status": {"PrimaryStatus": "InProgress", "Message": "E" + str(i)}, "StartTime": start_time + datetime.timedelta(hours=i), "EndTime": end_time + datetime.timedelta(hours=i), "CreationTime": creation_time + datetime.timedelta(hours=i), "LastModifiedTime": last_modified_time + datetime.timedelta(hours=i), "LastModifiedBy": {}, } for i in range(10) ], "NextToken": "100", }, { "TrialComponentSummaries": [ { "TrialComponentName": "A" + str(i), "TrialComponentArn": "B" + str(i), "DisplayName": "C" + str(i), "SourceArn": "D" + str(i), "Status": {"PrimaryStatus": "InProgress", "Message": "E" + str(i)}, "StartTime": start_time + datetime.timedelta(hours=i), "EndTime": end_time + datetime.timedelta(hours=i), "CreationTime": creation_time + datetime.timedelta(hours=i), "LastModifiedTime": last_modified_time + datetime.timedelta(hours=i), "LastModifiedBy": {}, } for i in range(10, 20) ] }, ] expected = [ api_types.TrialComponentSummary( trial_component_name="A" + str(i), trial_component_arn="B" + str(i), display_name="C" + str(i), source_arn="D" + str(i), status=api_types.TrialComponentStatus(primary_status="InProgress", message="E" + str(i)), start_time=start_time + datetime.timedelta(hours=i), end_time=end_time + datetime.timedelta(hours=i), creation_time=creation_time + datetime.timedelta(hours=i), last_modified_time=last_modified_time + datetime.timedelta(hours=i), last_modified_by={}, ) for i in range(20) ] result = list( trial_component.TrialComponent.list( sagemaker_boto_client=sagemaker_boto_client, source_arn="foo", sort_by="CreationTime", sort_order="Ascending", ) ) assert expected == result expected_calls = [ unittest.mock.call(SortBy="CreationTime", SortOrder="Ascending", SourceArn="foo"), unittest.mock.call(NextToken="100", SortBy="CreationTime", SortOrder="Ascending", SourceArn="foo"), ] assert expected_calls == sagemaker_boto_client.list_trial_components.mock_calls def test_list_empty(sagemaker_boto_client): sagemaker_boto_client.list_trial_components.return_value = {"TrialComponentSummaries": []} assert [] == list(trial_component.TrialComponent.list(sagemaker_boto_client=sagemaker_boto_client)) def test_list_trial_components_call_args(sagemaker_boto_client): created_before = datetime.datetime(1999, 10, 12, 0, 0, 0) created_after = datetime.datetime(1990, 10, 12, 0, 0, 0) trial_name = "foo-trial" experiment_name = "foo-experiment" next_token = "thetoken" max_results = 99 sagemaker_boto_client.list_trial_components.return_value = {} assert [] == list( trial_component.TrialComponent.list( sagemaker_boto_client=sagemaker_boto_client, trial_name=trial_name, experiment_name=experiment_name, created_before=created_before, created_after=created_after, next_token=next_token, max_results=max_results, sort_by="CreationTime", sort_order="Ascending", ) ) expected_calls = [ unittest.mock.call( TrialName="foo-trial", ExperimentName="foo-experiment", CreatedBefore=created_before, CreatedAfter=created_after, SortBy="CreationTime", SortOrder="Ascending", NextToken="thetoken", MaxResults=99, ) ] assert expected_calls == sagemaker_boto_client.list_trial_components.mock_calls def test_save(sagemaker_boto_client): obj = trial_component.TrialComponent(sagemaker_boto_client, trial_component_name="foo", display_name="bar") sagemaker_boto_client.update_trial_component.return_value = {} obj.save() sagemaker_boto_client.update_trial_component.assert_called_with(TrialComponentName="foo", DisplayName="bar") def test_delete(sagemaker_boto_client): obj = trial_component.TrialComponent(sagemaker_boto_client, trial_component_name="foo", display_name="bar") sagemaker_boto_client.delete_trial_component.return_value = {} obj.delete() sagemaker_boto_client.delete_trial_component.assert_called_with(TrialComponentName="foo") def test_boto_ignore(): obj = trial_component.TrialComponent(sagemaker_boto_client, trial_component_name="foo", display_name="bar") assert obj._boto_ignore() == ["ResponseMetadata", "CreatedBy"]
41.341121
118
0.635696
957
8,847
5.630094
0.214211
0.082034
0.119896
0.057906
0.605234
0.542687
0.455085
0.397921
0.379733
0.326095
0
0.011652
0.243359
8,847
213
119
41.535211
0.793248
0.061038
0
0.221591
0
0
0.12491
0.008319
0
0
0
0
0.119318
1
0.051136
false
0
0.022727
0.005682
0.079545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a40f2fa1c9f612802f1a429d750b73c73bf44c3
67,147
py
Python
src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_validators.py
AndrewLane/azure-cli
524491c580fc3c133f2d9859cef1c8251f4192e4
[ "MIT" ]
null
null
null
src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_validators.py
AndrewLane/azure-cli
524491c580fc3c133f2d9859cef1c8251f4192e4
[ "MIT" ]
3
2021-03-26T00:25:36.000Z
2022-03-29T22:03:55.000Z
src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_validators.py
david-driscoll/azure-cli
0dbf5e4ac2f35057bc9b8234b0a59612593552c5
[ "MIT" ]
null
null
null
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint:disable=too-many-lines import os try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse # pylint: disable=import-error from knack.log import get_logger from knack.util import CLIError from azure.cli.core.commands.validators import ( get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags) from azure.cli.core.util import hash_string from azure.cli.command_modules.vm._vm_utils import check_existence, get_target_network_api, get_storage_blob_uri from azure.cli.command_modules.vm._template_builder import StorageProfile import azure.cli.core.keys as keys from ._client_factory import _compute_client_factory from ._actions import _get_latest_image_version logger = get_logger(__name__) def validate_asg_names_or_ids(cmd, namespace): from msrestazure.tools import resource_id, is_valid_resource_id from azure.cli.core.profiles import ResourceType from azure.cli.core.commands.client_factory import get_subscription_id ApplicationSecurityGroup = cmd.get_models('ApplicationSecurityGroup', resource_type=ResourceType.MGMT_NETWORK) resource_group = namespace.resource_group_name subscription_id = get_subscription_id(cmd.cli_ctx) names_or_ids = getattr(namespace, 'application_security_groups') ids = [] if names_or_ids == [""] or not names_or_ids: return for val in names_or_ids: if not is_valid_resource_id(val): val = resource_id( subscription=subscription_id, resource_group=resource_group, namespace='Microsoft.Network', type='applicationSecurityGroups', name=val ) ids.append(ApplicationSecurityGroup(id=val)) setattr(namespace, 'application_security_groups', ids) def validate_nsg_name(cmd, namespace): from msrestazure.tools import resource_id from azure.cli.core.commands.client_factory import get_subscription_id vm_id = resource_id(name=namespace.vm_name, resource_group=namespace.resource_group_name, namespace='Microsoft.Compute', type='virtualMachines', subscription=get_subscription_id(cmd.cli_ctx)) namespace.network_security_group_name = namespace.network_security_group_name \ or '{}_NSG_{}'.format(namespace.vm_name, hash_string(vm_id, length=8)) def validate_keyvault(cmd, namespace): namespace.keyvault = _get_resource_id(cmd.cli_ctx, namespace.keyvault, namespace.resource_group_name, 'vaults', 'Microsoft.KeyVault') def process_vm_secret_format(cmd, namespace): from msrestazure.tools import is_valid_resource_id keyvault_usage = CLIError('usage error: [--keyvault NAME --resource-group NAME | --keyvault ID]') kv = namespace.keyvault rg = namespace.resource_group_name if rg: if not kv or is_valid_resource_id(kv): raise keyvault_usage validate_keyvault(cmd, namespace) else: if kv and not is_valid_resource_id(kv): raise keyvault_usage def _get_resource_group_from_vault_name(cli_ctx, vault_name): """ Fetch resource group from vault name :param str vault_name: name of the key vault :return: resource group name or None :rtype: str """ from azure.cli.core.profiles import ResourceType from azure.cli.core.commands.client_factory import get_mgmt_service_client from msrestazure.tools import parse_resource_id client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_KEYVAULT).vaults for vault in client.list(): id_comps = parse_resource_id(vault.id) if id_comps['name'] == vault_name: return id_comps['resource_group'] return None def _get_resource_id(cli_ctx, val, resource_group, resource_type, resource_namespace): from msrestazure.tools import resource_id, is_valid_resource_id from azure.cli.core.commands.client_factory import get_subscription_id if is_valid_resource_id(val): return val kwargs = { 'name': val, 'resource_group': resource_group, 'namespace': resource_namespace, 'type': resource_type, 'subscription': get_subscription_id(cli_ctx) } missing_kwargs = {k: v for k, v in kwargs.items() if not v} return resource_id(**kwargs) if not missing_kwargs else None def _get_nic_id(cli_ctx, val, resource_group): return _get_resource_id(cli_ctx, val, resource_group, 'networkInterfaces', 'Microsoft.Network') def validate_vm_nic(cmd, namespace): namespace.nic = _get_nic_id(cmd.cli_ctx, namespace.nic, namespace.resource_group_name) def validate_vm_nics(cmd, namespace): rg = namespace.resource_group_name nic_ids = [] for n in namespace.nics: nic_ids.append(_get_nic_id(cmd.cli_ctx, n, rg)) namespace.nics = nic_ids if hasattr(namespace, 'primary_nic') and namespace.primary_nic: namespace.primary_nic = _get_nic_id(cmd.cli_ctx, namespace.primary_nic, rg) def _validate_secrets(secrets, os_type): """ Validates a parsed JSON array containing secrets for use in VM Creation Secrets JSON structure [{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)" }] }] :param dict secrets: Dict fitting the JSON description above :param string os_type: the type of OS (linux or windows) :return: errors if any were found :rtype: list """ is_windows = os_type == 'windows' errors = [] try: loaded_secret = [validate_file_or_dict(secret) for secret in secrets] except Exception as err: raise CLIError('Error decoding secrets: {0}'.format(err)) for idx_arg, narg_secret in enumerate(loaded_secret): for idx, secret in enumerate(narg_secret): if 'sourceVault' not in secret: errors.append( 'Secret is missing sourceVault key at index {0} in arg {1}'.format( idx, idx_arg)) if 'sourceVault' in secret and 'id' not in secret['sourceVault']: errors.append( 'Secret is missing sourceVault.id key at index {0} in arg {1}'.format( idx, idx_arg)) if 'vaultCertificates' not in secret or not secret['vaultCertificates']: err = 'Secret is missing vaultCertificates array or it is empty at index {0} in ' \ 'arg {1} ' errors.append(err.format(idx, idx_arg)) else: for jdx, cert in enumerate(secret['vaultCertificates']): message = 'Secret is missing {0} within vaultCertificates array at secret ' \ 'index {1} and vaultCertificate index {2} in arg {3}' if 'certificateUrl' not in cert: errors.append(message.format('certificateUrl', idx, jdx, idx_arg)) if is_windows and 'certificateStore' not in cert: errors.append(message.format('certificateStore', idx, jdx, idx_arg)) if errors: raise CLIError('\n'.join(errors)) # region VM Create Validators def _parse_image_argument(cmd, namespace): """ Systematically determines what type is supplied for the --image parameter. Updates the namespace and returns the type for subsequent processing. """ from msrestazure.tools import is_valid_resource_id from msrestazure.azure_exceptions import CloudError import re # 1 - check if a fully-qualified ID (assumes it is an image ID) if is_valid_resource_id(namespace.image): return 'image_id' # 2 - attempt to match an URN pattern urn_match = re.match('([^:]*):([^:]*):([^:]*):([^:]*)', namespace.image) if urn_match: namespace.os_publisher = urn_match.group(1) namespace.os_offer = urn_match.group(2) namespace.os_sku = urn_match.group(3) namespace.os_version = urn_match.group(4) if not any([namespace.plan_name, namespace.plan_product, namespace.plan_publisher]): image_plan = _get_image_plan_info_if_exists(cmd, namespace) if image_plan: namespace.plan_name = image_plan.name namespace.plan_product = image_plan.product namespace.plan_publisher = image_plan.publisher return 'urn' # 3 - unmanaged vhd based images? if urlparse(namespace.image).scheme: return 'uri' # 4 - attempt to match an URN alias (most likely) from azure.cli.command_modules.vm._actions import load_images_from_aliases_doc images = load_images_from_aliases_doc(cmd.cli_ctx) matched = next((x for x in images if x['urnAlias'].lower() == namespace.image.lower()), None) if matched: namespace.os_publisher = matched['publisher'] namespace.os_offer = matched['offer'] namespace.os_sku = matched['sku'] namespace.os_version = matched['version'] return 'urn' # 5 - check if an existing managed disk image resource compute_client = _compute_client_factory(cmd.cli_ctx) try: compute_client.images.get(namespace.resource_group_name, namespace.image) namespace.image = _get_resource_id(cmd.cli_ctx, namespace.image, namespace.resource_group_name, 'images', 'Microsoft.Compute') return 'image_id' except CloudError: err = 'Invalid image "{}". Use a custom image name, id, or pick one from {}' raise CLIError(err.format(namespace.image, [x['urnAlias'] for x in images])) def _get_image_plan_info_if_exists(cmd, namespace): from msrestazure.azure_exceptions import CloudError try: compute_client = _compute_client_factory(cmd.cli_ctx) if namespace.os_version.lower() == 'latest': image_version = _get_latest_image_version(cmd.cli_ctx, namespace.location, namespace.os_publisher, namespace.os_offer, namespace.os_sku) else: image_version = namespace.os_version image = compute_client.virtual_machine_images.get(namespace.location, namespace.os_publisher, namespace.os_offer, namespace.os_sku, image_version) # pylint: disable=no-member return image.plan except CloudError as ex: logger.warning("Querying the image of '%s' failed for an error '%s'. Configuring plan settings " "will be skipped", namespace.image, ex.message) # pylint: disable=inconsistent-return-statements def _get_storage_profile_description(profile): if profile == StorageProfile.SACustomImage: return 'create unmanaged OS disk created from generalized VHD' elif profile == StorageProfile.SAPirImage: return 'create unmanaged OS disk from Azure Marketplace image' elif profile == StorageProfile.SASpecializedOSDisk: return 'attach to existing unmanaged OS disk' elif profile == StorageProfile.ManagedCustomImage: return 'create managed OS disk from custom image' elif profile == StorageProfile.ManagedPirImage: return 'create managed OS disk from Azure Marketplace image' elif profile == StorageProfile.ManagedSpecializedOSDisk: return 'attach existing managed OS disk' def _validate_managed_disk_sku(sku): allowed_skus = ['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS'] if sku and sku.lower() not in [x.lower() for x in allowed_skus]: raise CLIError("invalid storage SKU '{}': allowed values: '{}'".format(sku, allowed_skus)) def _validate_location(cmd, namespace, zone_info, size_info): from ._vm_utils import list_sku_info if not namespace.location: get_default_location_from_resource_group(cmd, namespace) if zone_info: sku_infos = list_sku_info(cmd.cli_ctx, namespace.location) temp = next((x for x in sku_infos if x.name.lower() == size_info.lower()), None) # For Stack (compute - 2017-03-30), Resource_sku doesn't implement location_info property if not hasattr(temp, 'location_info'): return if not temp or not [x for x in (temp.location_info or []) if x.zones]: raise CLIError("{}'s location can't be used to create the VM/VMSS because availablity zone is not yet " "supported. Please use '--location' to specify a capable one. 'az vm list-skus' can be " "used to find such locations".format(namespace.resource_group_name)) # pylint: disable=too-many-branches, too-many-statements def _validate_vm_create_storage_profile(cmd, namespace, for_scale_set=False): from msrestazure.tools import parse_resource_id # use minimal parameters to resolve the expected storage profile if getattr(namespace, 'attach_os_disk', None) and not namespace.image: if namespace.use_unmanaged_disk: # STORAGE PROFILE #3 namespace.storage_profile = StorageProfile.SASpecializedOSDisk else: # STORAGE PROFILE #6 namespace.storage_profile = StorageProfile.ManagedSpecializedOSDisk elif namespace.image and not getattr(namespace, 'attach_os_disk', None): image_type = _parse_image_argument(cmd, namespace) if image_type == 'uri': # STORAGE PROFILE #2 namespace.storage_profile = StorageProfile.SACustomImage elif image_type == 'image_id': # STORAGE PROFILE #5 namespace.storage_profile = StorageProfile.ManagedCustomImage elif image_type == 'urn': if namespace.use_unmanaged_disk: # STORAGE PROFILE #1 namespace.storage_profile = StorageProfile.SAPirImage else: # STORAGE PROFILE #4 namespace.storage_profile = StorageProfile.ManagedPirImage else: raise CLIError('Unrecognized image type: {}'.format(image_type)) else: # did not specify image XOR attach-os-disk raise CLIError('incorrect usage: --image IMAGE | --attach-os-disk DISK') auth_params = ['admin_password', 'admin_username', 'authentication_type', 'generate_ssh_keys', 'ssh_dest_key_path', 'ssh_key_value'] # perform parameter validation for the specific storage profile # start with the required/forbidden parameters for VM if namespace.storage_profile == StorageProfile.ManagedPirImage: required = ['image'] forbidden = ['os_type', 'attach_os_disk', 'storage_account', 'storage_container_name', 'use_unmanaged_disk'] if for_scale_set: forbidden.append('os_disk_name') _validate_managed_disk_sku(namespace.storage_sku) elif namespace.storage_profile == StorageProfile.ManagedCustomImage: required = ['image'] forbidden = ['os_type', 'attach_os_disk', 'storage_account', 'storage_container_name', 'use_unmanaged_disk'] if for_scale_set: forbidden.append('os_disk_name') _validate_managed_disk_sku(namespace.storage_sku) elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk: required = ['os_type', 'attach_os_disk'] forbidden = ['os_disk_name', 'os_caching', 'storage_account', 'storage_container_name', 'use_unmanaged_disk', 'storage_sku'] + auth_params _validate_managed_disk_sku(namespace.storage_sku) elif namespace.storage_profile == StorageProfile.SAPirImage: required = ['image', 'use_unmanaged_disk'] forbidden = ['os_type', 'attach_os_disk', 'data_disk_sizes_gb'] elif namespace.storage_profile == StorageProfile.SACustomImage: required = ['image', 'os_type', 'use_unmanaged_disk'] forbidden = ['attach_os_disk', 'data_disk_sizes_gb'] elif namespace.storage_profile == StorageProfile.SASpecializedOSDisk: required = ['os_type', 'attach_os_disk', 'use_unmanaged_disk'] forbidden = ['os_disk_name', 'os_caching', 'image', 'storage_account', 'storage_container_name', 'data_disk_sizes_gb', 'storage_sku'] + auth_params else: raise CLIError('Unrecognized storage profile: {}'.format(namespace.storage_profile)) logger.debug("storage profile '%s'", namespace.storage_profile) if for_scale_set: # VMSS lacks some parameters, so scrub these out props_to_remove = ['attach_os_disk', 'storage_account'] for prop in props_to_remove: if prop in required: required.remove(prop) if prop in forbidden: forbidden.remove(prop) # set default storage SKU if not provided and using an image based OS if not namespace.storage_sku and namespace.storage_profile in [StorageProfile.SAPirImage, StorageProfile.SACustomImage]: # pylint: disable=line-too-long namespace.storage_sku = 'Standard_LRS' if for_scale_set else 'Premium_LRS' if namespace.storage_sku == 'UltraSSD_LRS' and namespace.ultra_ssd_enabled is None: namespace.ultra_ssd_enabled = True # Now verify that the status of required and forbidden parameters validate_parameter_set( namespace, required, forbidden, description='storage profile: {}:'.format(_get_storage_profile_description(namespace.storage_profile))) image_data_disks_num = 0 if namespace.storage_profile == StorageProfile.ManagedCustomImage: # extract additional information from a managed custom image res = parse_resource_id(namespace.image) compute_client = _compute_client_factory(cmd.cli_ctx, subscription_id=res['subscription']) if res['type'].lower() == 'images': image_info = compute_client.images.get(res['resource_group'], res['name']) namespace.os_type = image_info.storage_profile.os_disk.os_type.value image_data_disks_num = len(image_info.storage_profile.data_disks or []) elif res['type'].lower() == 'galleries': image_info = compute_client.gallery_images.get(resource_group_name=res['resource_group'], gallery_name=res['name'], gallery_image_name=res['child_name_1']) namespace.os_type = image_info.os_type.value gallery_image_version = res.get('child_name_2', '') if gallery_image_version.lower() in ['latest', '']: image_version_infos = compute_client.gallery_image_versions.list_by_gallery_image( resource_group_name=res['resource_group'], gallery_name=res['name'], gallery_image_name=res['child_name_1']) image_version_infos = [x for x in image_version_infos if not x.publishing_profile.exclude_from_latest] if not image_version_infos: raise CLIError('There is no latest image version exists for "{}"'.format(namespace.image)) image_version_info = sorted(image_version_infos, key=lambda x: x.publishing_profile.published_date)[-1] else: image_version_info = compute_client.gallery_image_versions.get( resource_group_name=res['resource_group'], gallery_name=res['name'], gallery_image_name=res['child_name_1'], gallery_image_version_name=res['child_name_2']) image_data_disks_num = len(image_version_info.storage_profile.data_disk_images or []) else: raise CLIError('usage error: unrecognized image informations "{}"'.format(namespace.image)) # pylint: disable=no-member elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk: # accept disk name or ID namespace.attach_os_disk = _get_resource_id( cmd.cli_ctx, namespace.attach_os_disk, namespace.resource_group_name, 'disks', 'Microsoft.Compute') if getattr(namespace, 'attach_data_disks', None): if not namespace.use_unmanaged_disk: namespace.attach_data_disks = [_get_resource_id(cmd.cli_ctx, d, namespace.resource_group_name, 'disks', 'Microsoft.Compute') for d in namespace.attach_data_disks] if not namespace.os_type: namespace.os_type = 'windows' if 'windows' in namespace.os_offer.lower() else 'linux' from ._vm_utils import normalize_disk_info # attach_data_disks are not exposed yet for VMSS, so use 'getattr' to avoid crash namespace.disk_info = normalize_disk_info(image_data_disks_num=image_data_disks_num, data_disk_sizes_gb=namespace.data_disk_sizes_gb, attach_data_disks=getattr(namespace, 'attach_data_disks', []), storage_sku=namespace.storage_sku, os_disk_caching=namespace.os_caching, data_disk_cachings=namespace.data_caching) def _validate_vm_create_storage_account(cmd, namespace): from msrestazure.tools import parse_resource_id if namespace.storage_account: storage_id = parse_resource_id(namespace.storage_account) rg = storage_id.get('resource_group', namespace.resource_group_name) if check_existence(cmd.cli_ctx, storage_id['name'], rg, 'Microsoft.Storage', 'storageAccounts'): # 1 - existing storage account specified namespace.storage_account_type = 'existing' logger.debug("using specified existing storage account '%s'", storage_id['name']) else: # 2 - params for new storage account specified namespace.storage_account_type = 'new' logger.debug("specified storage account '%s' not found and will be created", storage_id['name']) else: from azure.cli.core.profiles import ResourceType from azure.cli.core.commands.client_factory import get_mgmt_service_client storage_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_STORAGE).storage_accounts # find storage account in target resource group that matches the VM's location sku_tier = 'Premium' if 'Premium' in namespace.storage_sku else 'Standard' account = next( (a for a in storage_client.list_by_resource_group(namespace.resource_group_name) if a.sku.tier.value == sku_tier and a.location == namespace.location), None) if account: # 3 - nothing specified - find viable storage account in target resource group namespace.storage_account = account.name namespace.storage_account_type = 'existing' logger.debug("suitable existing storage account '%s' will be used", account.name) else: # 4 - nothing specified - create a new storage account namespace.storage_account_type = 'new' logger.debug('no suitable storage account found. One will be created.') def _validate_vm_create_availability_set(cmd, namespace): from msrestazure.tools import parse_resource_id, resource_id from azure.cli.core.commands.client_factory import get_subscription_id if namespace.availability_set: as_id = parse_resource_id(namespace.availability_set) name = as_id['name'] rg = as_id.get('resource_group', namespace.resource_group_name) if not check_existence(cmd.cli_ctx, name, rg, 'Microsoft.Compute', 'availabilitySets'): raise CLIError("Availability set '{}' does not exist.".format(name)) namespace.availability_set = resource_id( subscription=get_subscription_id(cmd.cli_ctx), resource_group=rg, namespace='Microsoft.Compute', type='availabilitySets', name=name) logger.debug("adding to specified availability set '%s'", namespace.availability_set) def _validate_vm_vmss_create_vnet(cmd, namespace, for_scale_set=False): from msrestazure.tools import is_valid_resource_id vnet = namespace.vnet_name subnet = namespace.subnet rg = namespace.resource_group_name location = namespace.location nics = getattr(namespace, 'nics', None) if not vnet and not subnet and not nics: logger.debug('no subnet specified. Attempting to find an existing Vnet and subnet...') # if nothing specified, try to find an existing vnet and subnet in the target resource group client = get_network_client(cmd.cli_ctx).virtual_networks # find VNET in target resource group that matches the VM's location with a matching subnet for vnet_match in (v for v in client.list(rg) if v.location == location and v.subnets): # 1 - find a suitable existing vnet/subnet result = None if not for_scale_set: result = next((s for s in vnet_match.subnets if s.name.lower() != 'gatewaysubnet'), None) else: def _check_subnet(s): if s.name.lower() == 'gatewaysubnet': return False subnet_mask = s.address_prefix.split('/')[-1] return _subnet_capacity_check(subnet_mask, namespace.instance_count, not namespace.disable_overprovision) result = next((s for s in vnet_match.subnets if _check_subnet(s)), None) if not result: continue namespace.subnet = result.name namespace.vnet_name = vnet_match.name namespace.vnet_type = 'existing' logger.debug("existing vnet '%s' and subnet '%s' found", namespace.vnet_name, namespace.subnet) return if subnet: subnet_is_id = is_valid_resource_id(subnet) if (subnet_is_id and vnet) or (not subnet_is_id and not vnet): raise CLIError("incorrect '--subnet' usage: --subnet SUBNET_ID | " "--subnet SUBNET_NAME --vnet-name VNET_NAME") subnet_exists = \ check_existence(cmd.cli_ctx, subnet, rg, 'Microsoft.Network', 'subnets', vnet, 'virtualNetworks') if subnet_is_id and not subnet_exists: raise CLIError("Subnet '{}' does not exist.".format(subnet)) elif subnet_exists: # 2 - user specified existing vnet/subnet namespace.vnet_type = 'existing' logger.debug("using specified vnet '%s' and subnet '%s'", namespace.vnet_name, namespace.subnet) return # 3 - create a new vnet/subnet namespace.vnet_type = 'new' logger.debug('no suitable subnet found. One will be created.') def _subnet_capacity_check(subnet_mask, vmss_instance_count, over_provision): mask = int(subnet_mask) # '2' are the reserved broadcasting addresses # '*1.5' so we have enough leeway for over-provision factor = 1.5 if over_provision else 1 return ((1 << (32 - mask)) - 2) > int(vmss_instance_count * factor) def _validate_vm_vmss_accelerated_networking(cli_ctx, namespace): if namespace.accelerated_networking is None: size = getattr(namespace, 'size', None) or getattr(namespace, 'vm_sku', None) size = size.lower() # to refresh the list, run 'az vm create --accelerated-networking --size Standard_DS1_v2' and # get it from the error aval_sizes = ['Standard_D3_v2', 'Standard_D12_v2', 'Standard_D3_v2_Promo', 'Standard_D12_v2_Promo', 'Standard_DS3_v2', 'Standard_DS12_v2', 'Standard_DS13-4_v2', 'Standard_DS14-4_v2', 'Standard_DS3_v2_Promo', 'Standard_DS12_v2_Promo', 'Standard_DS13-4_v2_Promo', 'Standard_DS14-4_v2_Promo', 'Standard_F4', 'Standard_F4s', 'Standard_D8_v3', 'Standard_D8s_v3', 'Standard_D32-8s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_D3_v2_ABC', 'Standard_D12_v2_ABC', 'Standard_F4_ABC', 'Standard_F8s_v2', 'Standard_D4_v2', 'Standard_D13_v2', 'Standard_D4_v2_Promo', 'Standard_D13_v2_Promo', 'Standard_DS4_v2', 'Standard_DS13_v2', 'Standard_DS14-8_v2', 'Standard_DS4_v2_Promo', 'Standard_DS13_v2_Promo', 'Standard_DS14-8_v2_Promo', 'Standard_F8', 'Standard_F8s', 'Standard_M64-16ms', 'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D32-16s_v3', 'Standard_D64-16s_v3', 'Standard_E16_v3', 'Standard_E16s_v3', 'Standard_E32-16s_v3', 'Standard_D4_v2_ABC', 'Standard_D13_v2_ABC', 'Standard_F8_ABC', 'Standard_F16s_v2', 'Standard_D5_v2', 'Standard_D14_v2', 'Standard_D5_v2_Promo', 'Standard_D14_v2_Promo', 'Standard_DS5_v2', 'Standard_DS14_v2', 'Standard_DS5_v2_Promo', 'Standard_DS14_v2_Promo', 'Standard_F16', 'Standard_F16s', 'Standard_M64-32ms', 'Standard_M128-32ms', 'Standard_D32_v3', 'Standard_D32s_v3', 'Standard_D64-32s_v3', 'Standard_E32_v3', 'Standard_E32s_v3', 'Standard_E32-8s_v3', 'Standard_E32-16_v3', 'Standard_D5_v2_ABC', 'Standard_D14_v2_ABC', 'Standard_F16_ABC', 'Standard_F32s_v2', 'Standard_D15_v2', 'Standard_D15_v2_Promo', 'Standard_D15_v2_Nested', 'Standard_DS15_v2', 'Standard_DS15_v2_Promo', 'Standard_DS15_v2_Nested', 'Standard_D40_v3', 'Standard_D40s_v3', 'Standard_D15_v2_ABC', 'Standard_M64ms', 'Standard_M64s', 'Standard_M128-64ms', 'Standard_D64_v3', 'Standard_D64s_v3', 'Standard_E64_v3', 'Standard_E64s_v3', 'Standard_E64-16s_v3', 'Standard_E64-32s_v3', 'Standard_F64s_v2', 'Standard_F72s_v2', 'Standard_M128s', 'Standard_M128ms', 'Standard_L8s_v2', 'Standard_L16s_v2', 'Standard_L32s_v2', 'Standard_L64s_v2', 'Standard_L96s_v2', 'SQLGL', 'SQLGLCore', 'Standard_D4_v3', 'Standard_D4s_v3', 'Standard_D2_v2', 'Standard_DS2_v2', 'Standard_E4_v3', 'Standard_E4s_v3', 'Standard_F2', 'Standard_F2s', 'Standard_F4s_v2', 'Standard_D11_v2', 'Standard_DS11_v2', 'AZAP_Performance_ComputeV17C'] aval_sizes = [x.lower() for x in aval_sizes] if size not in aval_sizes: return new_4core_sizes = ['Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D3_v2_ABC', 'Standard_DS3_v2', 'Standard_DS3_v2_Promo', 'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D12_v2_ABC', 'Standard_DS12_v2', 'Standard_DS12_v2_Promo', 'Standard_F8s_v2', 'Standard_F4', 'Standard_F4_ABC', 'Standard_F4s', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_D8_v3', 'Standard_D8s_v3'] new_4core_sizes = [x.lower() for x in new_4core_sizes] if size not in new_4core_sizes: compute_client = _compute_client_factory(cli_ctx) sizes = compute_client.virtual_machine_sizes.list(namespace.location) size_info = next((s for s in sizes if s.name.lower() == size), None) if size_info is None or size_info.number_of_cores < 8: return # VMs need to be a supported image in the marketplace # Ubuntu 16.04, SLES 12 SP3, RHEL 7.4, CentOS 7.4, CoreOS Linux, Debian "Stretch" with backports kernel # Oracle Linux 7.4, Windows Server 2016, Windows Server 2012R2 publisher, offer, sku = namespace.os_publisher, namespace.os_offer, namespace.os_sku if not publisher: return publisher, offer, sku = publisher.lower(), offer.lower(), sku.lower() distros = [('canonical', 'UbuntuServer', '^16.04'), ('suse', 'sles', '^12-sp3'), ('redhat', 'rhel', '^7.4'), ('openlogic', 'centos', '^7.4'), ('coreos', 'coreos', None), ('credativ', 'debian', '-backports'), ('oracle', 'oracle-linux', '^7.4'), ('MicrosoftWindowsServer', 'WindowsServer', '^2016'), ('MicrosoftWindowsServer', 'WindowsServer', '^2012-R2')] import re for p, o, s in distros: if p.lower() == publisher and (o is None or o.lower() == offer) and (s is None or re.match(s, sku, re.I)): namespace.accelerated_networking = True def _validate_vmss_create_subnet(namespace): if namespace.vnet_type == 'new': if namespace.subnet_address_prefix is None: cidr = namespace.vnet_address_prefix.split('/', 1)[0] i = 0 for i in range(24, 16, -1): if _subnet_capacity_check(i, namespace.instance_count, not namespace.disable_overprovision): break if i < 16: err = "instance count '{}' is out of range of 2^16 subnet size'" raise CLIError(err.format(namespace.instance_count)) namespace.subnet_address_prefix = '{}/{}'.format(cidr, i) if namespace.app_gateway_type and namespace.app_gateway_subnet_address_prefix is None: namespace.app_gateway_subnet_address_prefix = _get_next_subnet_addr_suffix( namespace.vnet_address_prefix, namespace.subnet_address_prefix, 24) def _get_next_subnet_addr_suffix(vnet_cidr, subnet_cidr, new_mask): def _convert_to_int(address, bit_mask_len): a, b, c, d = [int(x) for x in address.split('.')] result = '{0:08b}{1:08b}{2:08b}{3:08b}'.format(a, b, c, d) return int(result[:-bit_mask_len], 2) error_msg = "usage error: --subnet-address-prefix value should be a subrange of --vnet-address-prefix's" # extract vnet information needed to verify the defaults we are coming out vnet_ip_address, mask = vnet_cidr.split('/') vnet_bit_mask_len = 32 - int(mask) vnet_int = _convert_to_int(vnet_ip_address, vnet_bit_mask_len) subnet_ip_address, mask = subnet_cidr.split('/') subnet_bit_mask_len = 32 - int(mask) if vnet_bit_mask_len <= subnet_bit_mask_len: raise CLIError(error_msg) candidate_int = _convert_to_int(subnet_ip_address, subnet_bit_mask_len) + 1 if (candidate_int >> (vnet_bit_mask_len - subnet_bit_mask_len)) > vnet_int: # overflows? candidate_int = candidate_int - 2 # try the other way around if (candidate_int >> (vnet_bit_mask_len - subnet_bit_mask_len)) > vnet_int: raise CLIError(error_msg) # format back to the cidr candaidate_str = '{0:32b}'.format(candidate_int << subnet_bit_mask_len) return '{0}.{1}.{2}.{3}/{4}'.format(int(candaidate_str[0:8], 2), int(candaidate_str[8:16], 2), int(candaidate_str[16:24], 2), int(candaidate_str[24:32], 2), new_mask) def _validate_vm_create_nsg(cmd, namespace): if namespace.nsg: if check_existence(cmd.cli_ctx, namespace.nsg, namespace.resource_group_name, 'Microsoft.Network', 'networkSecurityGroups'): namespace.nsg_type = 'existing' logger.debug("using specified NSG '%s'", namespace.nsg) else: namespace.nsg_type = 'new' logger.debug("specified NSG '%s' not found. It will be created.", namespace.nsg) elif namespace.nsg == '': namespace.nsg_type = None logger.debug('no NSG will be used') elif namespace.nsg is None: namespace.nsg_type = 'new' logger.debug('new NSG will be created') def _validate_vmss_create_nsg(cmd, namespace): if namespace.nsg: namespace.nsg = _get_resource_id(cmd.cli_ctx, namespace.nsg, namespace.resource_group_name, 'networkSecurityGroups', 'Microsoft.Network') def _validate_vm_vmss_create_public_ip(cmd, namespace): if namespace.public_ip_address: if check_existence(cmd.cli_ctx, namespace.public_ip_address, namespace.resource_group_name, 'Microsoft.Network', 'publicIPAddresses'): namespace.public_ip_address_type = 'existing' logger.debug("using existing specified public IP '%s'", namespace.public_ip_address) else: namespace.public_ip_address_type = 'new' logger.debug("specified public IP '%s' not found. It will be created.", namespace.public_ip_address) elif namespace.public_ip_address == '': namespace.public_ip_address_type = None logger.debug('no public IP address will be used') elif namespace.public_ip_address is None: namespace.public_ip_address_type = 'new' logger.debug('new public IP address will be created') # Public-IP SKU is only exposed for VM. VMSS has no such needs so far if getattr(namespace, 'public_ip_sku', None): from azure.cli.core.profiles import ResourceType PublicIPAddressSkuName, IPAllocationMethod = cmd.get_models('PublicIPAddressSkuName', 'IPAllocationMethod', resource_type=ResourceType.MGMT_NETWORK) if namespace.public_ip_sku == PublicIPAddressSkuName.standard.value: if not namespace.public_ip_address_allocation: namespace.public_ip_address_allocation = IPAllocationMethod.static.value def _validate_vmss_create_public_ip(cmd, namespace): if namespace.load_balancer_type is None and namespace.app_gateway_type is None: if namespace.public_ip_address: raise CLIError('--public-ip-address can only be used when creating a new load ' 'balancer or application gateway frontend.') namespace.public_ip_address = '' _validate_vm_vmss_create_public_ip(cmd, namespace) def _validate_vm_create_nics(cmd, namespace): from msrestazure.tools import resource_id from azure.cli.core.commands.client_factory import get_subscription_id nics_value = namespace.nics nics = [] if not nics_value: namespace.nic_type = 'new' logger.debug('new NIC will be created') return if not isinstance(nics_value, list): nics_value = [nics_value] for n in nics_value: nics.append({ 'id': n if '/' in n else resource_id(name=n, resource_group=namespace.resource_group_name, namespace='Microsoft.Network', type='networkInterfaces', subscription=get_subscription_id(cmd.cli_ctx)), 'properties': { 'primary': nics_value[0] == n } }) namespace.nics = nics namespace.nic_type = 'existing' namespace.public_ip_address_type = None logger.debug('existing NIC(s) will be used') def _validate_vm_vmss_create_auth(namespace): if namespace.storage_profile in [StorageProfile.ManagedSpecializedOSDisk, StorageProfile.SASpecializedOSDisk]: return namespace.admin_username = _validate_admin_username(namespace.admin_username, namespace.os_type) if not namespace.os_type: raise CLIError("Unable to resolve OS type. Specify '--os-type' argument.") if not namespace.authentication_type: # apply default auth type (password for Windows, ssh for Linux) by examining the OS type namespace.authentication_type = 'password' \ if (namespace.os_type.lower() == 'windows' or namespace.admin_password) else 'ssh' if namespace.os_type.lower() == 'windows' and namespace.authentication_type == 'ssh': raise CLIError('SSH not supported for Windows VMs.') # validate proper arguments supplied based on the authentication type if namespace.authentication_type == 'password': if namespace.ssh_key_value or namespace.ssh_dest_key_path: raise ValueError( "incorrect usage for authentication-type 'password': " "[--admin-username USERNAME] --admin-password PASSWORD") from knack.prompting import prompt_pass, NoTTYException try: if not namespace.admin_password: namespace.admin_password = prompt_pass('Admin Password: ', confirm=True) except NoTTYException: raise CLIError('Please specify password in non-interactive mode.') # validate password _validate_admin_password(namespace.admin_password, namespace.os_type) elif namespace.authentication_type == 'ssh': if namespace.admin_password: raise ValueError('Admin password cannot be used with SSH authentication type') validate_ssh_key(namespace) if not namespace.ssh_dest_key_path: namespace.ssh_dest_key_path = \ '/home/{}/.ssh/authorized_keys'.format(namespace.admin_username) def _validate_admin_username(username, os_type): import re if not username: raise CLIError("admin user name can not be empty") is_linux = (os_type.lower() == 'linux') # pylint: disable=line-too-long pattern = (r'[\\\/"\[\]:|<>+=;,?*@#()!A-Z]+' if is_linux else r'[\\\/"\[\]:|<>+=;,?*@]+') linux_err = r'admin user name cannot contain upper case character A-Z, special characters \/"[]:|<>+=;,?*@#()! or start with $ or -' win_err = r'admin user name cannot contain special characters \/"[]:|<>+=;,?*@# or ends with .' if re.findall(pattern, username): raise CLIError(linux_err if is_linux else win_err) if is_linux and re.findall(r'^[$-]+', username): raise CLIError(linux_err) if not is_linux and username.endswith('.'): raise CLIError(win_err) disallowed_user_names = [ "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "guest", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5"] if username.lower() in disallowed_user_names: raise CLIError("This user name '{}' meets the general requirements, but is specifically disallowed for this image. Please try a different value.".format(username)) return username def _validate_admin_password(password, os_type): import re is_linux = (os_type.lower() == 'linux') max_length = 72 if is_linux else 123 min_length = 12 if len(password) not in range(min_length, max_length + 1): raise CLIError('The password length must be between {} and {}'.format(min_length, max_length)) contains_lower = re.findall('[a-z]+', password) contains_upper = re.findall('[A-Z]+', password) contains_digit = re.findall('[0-9]+', password) contains_special_char = re.findall(r'[ `~!@#$%^&*()=+_\[\]{}\|;:.\/\'\",<>?]+', password) count = len([x for x in [contains_lower, contains_upper, contains_digit, contains_special_char] if x]) # pylint: disable=line-too-long if count < 3: raise CLIError('Password must have the 3 of the following: 1 lower case character, 1 upper case character, 1 number and 1 special character') def validate_ssh_key(namespace): string_or_file = (namespace.ssh_key_value or os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')) content = string_or_file if os.path.exists(string_or_file): logger.info('Use existing SSH public key file: %s', string_or_file) with open(string_or_file, 'r') as f: content = f.read() elif not keys.is_valid_ssh_rsa_public_key(content): if namespace.generate_ssh_keys: # figure out appropriate file names: # 'base_name'(with private keys), and 'base_name.pub'(with public keys) public_key_filepath = string_or_file if public_key_filepath[-4:].lower() == '.pub': private_key_filepath = public_key_filepath[:-4] else: private_key_filepath = public_key_filepath + '.private' content = keys.generate_ssh_keys(private_key_filepath, public_key_filepath) logger.warning("SSH key files '%s' and '%s' have been generated under ~/.ssh to " "allow SSH access to the VM. If using machines without " "permanent storage, back up your keys to a safe location.", private_key_filepath, public_key_filepath) else: raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. ' 'You can use --generate-ssh-keys to let CLI generate one for you') namespace.ssh_key_value = content def _validate_vm_vmss_msi(cmd, namespace, from_set_command=False): if from_set_command or namespace.assign_identity is not None: identities = namespace.assign_identity or [] from ._vm_utils import MSI_LOCAL_ID for i, _ in enumerate(identities): if identities[i] != MSI_LOCAL_ID: identities[i] = _get_resource_id(cmd.cli_ctx, identities[i], namespace.resource_group_name, 'userAssignedIdentities', 'Microsoft.ManagedIdentity') if not namespace.identity_scope and getattr(namespace.identity_role, 'is_default', None) is None: raise CLIError("usage error: '--role {}' is not applicable as the '--scope' is not provided".format( namespace.identity_role)) user_assigned_identities = [x for x in identities if x != MSI_LOCAL_ID] if user_assigned_identities and not cmd.supported_api_version(min_api='2017-12-01'): raise CLIError('usage error: user assigned identity is only available under profile ' 'with minimum Compute API version of 2017-12-01') if namespace.identity_scope: if identities and MSI_LOCAL_ID not in identities: raise CLIError("usage error: '--scope'/'--role' is only applicable when assign system identity") # keep 'identity_role' for output as logical name is more readable setattr(namespace, 'identity_role_id', _resolve_role_id(cmd.cli_ctx, namespace.identity_role, namespace.identity_scope)) elif namespace.identity_scope or getattr(namespace.identity_role, 'is_default', None) is None: raise CLIError('usage error: --assign-identity [--scope SCOPE] [--role ROLE]') def _resolve_role_id(cli_ctx, role, scope): import re import uuid from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.cli.core.profiles import ResourceType client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION).role_definitions role_id = None if re.match(r'/subscriptions/.+/providers/Microsoft.Authorization/roleDefinitions/', role, re.I): role_id = role else: try: uuid.UUID(role) role_id = '/subscriptions/{}/providers/Microsoft.Authorization/roleDefinitions/{}'.format( client.config.subscription_id, role) except ValueError: pass if not role_id: # retrieve role id role_defs = list(client.list(scope, "roleName eq '{}'".format(role))) if not role_defs: raise CLIError("Role '{}' doesn't exist.".format(role)) elif len(role_defs) > 1: ids = [r.id for r in role_defs] err = "More than one role matches the given name '{}'. Please pick an id from '{}'" raise CLIError(err.format(role, ids)) role_id = role_defs[0].id return role_id def process_vm_create_namespace(cmd, namespace): validate_tags(namespace) _validate_location(cmd, namespace, namespace.zone, namespace.size) validate_asg_names_or_ids(cmd, namespace) _validate_vm_create_storage_profile(cmd, namespace) if namespace.storage_profile in [StorageProfile.SACustomImage, StorageProfile.SAPirImage]: _validate_vm_create_storage_account(cmd, namespace) _validate_vm_create_availability_set(cmd, namespace) _validate_vm_vmss_create_vnet(cmd, namespace) _validate_vm_create_nsg(cmd, namespace) _validate_vm_vmss_create_public_ip(cmd, namespace) _validate_vm_create_nics(cmd, namespace) _validate_vm_vmss_accelerated_networking(cmd.cli_ctx, namespace) _validate_vm_vmss_create_auth(namespace) if namespace.secrets: _validate_secrets(namespace.secrets, namespace.os_type) if namespace.license_type and namespace.os_type.lower() != 'windows': raise CLIError('usage error: --license-type is only applicable on Windows VM') _validate_vm_vmss_msi(cmd, namespace) if namespace.boot_diagnostics_storage: namespace.boot_diagnostics_storage = get_storage_blob_uri(cmd.cli_ctx, namespace.boot_diagnostics_storage) # endregion # region VMSS Create Validators def _get_default_address_pool(cli_ctx, resource_group, balancer_name, balancer_type): option_name = '--backend-pool-name' client = getattr(get_network_client(cli_ctx), balancer_type, None) if not client: raise CLIError('unrecognized balancer type: {}'.format(balancer_type)) balancer = client.get(resource_group, balancer_name) values = [x.name for x in balancer.backend_address_pools] if len(values) > 1: raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' " "explicitly.".format(option_name, ', '.join(values))) elif not values: raise CLIError("No existing values found for '{0}'. Create one first and try " "again.".format(option_name)) return values[0] def _validate_vmss_single_placement_group(namespace): if namespace.platform_fault_domain_count is not None and namespace.zones is None: raise CLIError('usage error: --platform-fault-domain-count COUNT --zones ZONES') if namespace.zones or namespace.instance_count > 100: if namespace.single_placement_group is None: namespace.single_placement_group = False elif namespace.single_placement_group: raise CLIError("usage error: '--single-placement-group' should be turned off for zonal scale-sets or with" " 100+ instances") def _validate_vmss_create_load_balancer_or_app_gateway(cmd, namespace): from msrestazure.azure_exceptions import CloudError from msrestazure.tools import parse_resource_id from azure.cli.core.profiles import ResourceType std_lb_is_available = cmd.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK) if namespace.load_balancer and namespace.application_gateway: raise CLIError('incorrect usage: --load-balancer NAME_OR_ID | ' '--application-gateway NAME_OR_ID') # Resolve the type of balancer (if any) being used balancer_type = 'None' if namespace.load_balancer is None and namespace.application_gateway is None: if std_lb_is_available: balancer_type = 'loadBalancer' else: # needed for Stack profile 2017_03_09 balancer_type = 'loadBalancer' if namespace.single_placement_group is not False else 'applicationGateway' logger.debug("W/o STD LB, defaulting to '%s' under because single placement group is disabled", balancer_type) elif namespace.load_balancer: balancer_type = 'loadBalancer' elif namespace.application_gateway: balancer_type = 'applicationGateway' if balancer_type == 'applicationGateway': if namespace.application_gateway: client = get_network_client(cmd.cli_ctx).application_gateways try: rg = parse_resource_id(namespace.application_gateway).get( 'resource_group', namespace.resource_group_name) ag_name = parse_resource_id(namespace.application_gateway)['name'] client.get(rg, ag_name) namespace.app_gateway_type = 'existing' namespace.backend_pool_name = namespace.backend_pool_name or \ _get_default_address_pool(cmd.cli_ctx, rg, ag_name, 'application_gateways') logger.debug("using specified existing application gateway '%s'", namespace.application_gateway) except CloudError: namespace.app_gateway_type = 'new' logger.debug("application gateway '%s' not found. It will be created.", namespace.application_gateway) elif namespace.application_gateway == '': namespace.app_gateway_type = None logger.debug('no application gateway will be used') elif namespace.application_gateway is None: namespace.app_gateway_type = 'new' logger.debug('new application gateway will be created') # AppGateway frontend required = [] if namespace.app_gateway_type == 'new': required.append('app_gateway_sku') required.append('app_gateway_capacity') if namespace.vnet_type != 'new': required.append('app_gateway_subnet_address_prefix') elif namespace.app_gateway_type == 'existing': required.append('backend_pool_name') forbidden = ['nat_pool_name', 'load_balancer', 'health_probe'] validate_parameter_set(namespace, required, forbidden, description='network balancer: application gateway') elif balancer_type == 'loadBalancer': # LoadBalancer frontend required = [] forbidden = ['app_gateway_subnet_address_prefix', 'application_gateway', 'app_gateway_sku', 'app_gateway_capacity'] validate_parameter_set(namespace, required, forbidden, description='network balancer: load balancer') if namespace.load_balancer: rg = parse_resource_id(namespace.load_balancer).get('resource_group', namespace.resource_group_name) lb_name = parse_resource_id(namespace.load_balancer)['name'] lb = get_network_lb(cmd.cli_ctx, namespace.resource_group_name, lb_name) if lb: namespace.load_balancer_type = 'existing' namespace.backend_pool_name = namespace.backend_pool_name or \ _get_default_address_pool(cmd.cli_ctx, rg, lb_name, 'load_balancers') if not namespace.nat_pool_name: if len(lb.inbound_nat_pools) > 1: raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' explicitly.".format( # pylint: disable=line-too-long '--nat-pool-name', ', '.join([n.name for n in lb.inbound_nat_pools]))) elif not lb.inbound_nat_pools: # Associated scaleset will be missing ssh/rdp, so warn here. logger.warning("No inbound nat pool was configured on '%s'", namespace.load_balancer) else: namespace.nat_pool_name = lb.inbound_nat_pools[0].name logger.debug("using specified existing load balancer '%s'", namespace.load_balancer) else: namespace.load_balancer_type = 'new' logger.debug("load balancer '%s' not found. It will be created.", namespace.load_balancer) elif namespace.load_balancer == '': namespace.load_balancer_type = None logger.debug('no load balancer will be used') elif namespace.load_balancer is None: namespace.load_balancer_type = 'new' logger.debug('new load balancer will be created') if namespace.load_balancer_type == 'new' and namespace.single_placement_group is False and std_lb_is_available: LBSkuName = cmd.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK) if namespace.load_balancer_sku is None: namespace.load_balancer_sku = LBSkuName.standard.value logger.debug("use Standard sku as single placement group is turned off") elif namespace.load_balancer_sku == LBSkuName.basic.value: if namespace.zones: err = "'Standard' load balancer is required for zonal scale-sets" elif namespace.instance_count > 100: err = "'Standard' load balancer is required for scale-sets with 100+ instances" else: err = "'Standard' load balancer is required because 'single placement group' is turned off" raise CLIError('usage error:{}'.format(err)) def get_network_client(cli_ctx): from azure.cli.core.profiles import ResourceType from azure.cli.core.commands.client_factory import get_mgmt_service_client return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK, api_version=get_target_network_api(cli_ctx)) def get_network_lb(cli_ctx, resource_group_name, lb_name): from msrestazure.azure_exceptions import CloudError network_client = get_network_client(cli_ctx) try: return network_client.load_balancers.get(resource_group_name, lb_name) except CloudError: return None def process_vmss_create_namespace(cmd, namespace): validate_tags(namespace) if namespace.vm_sku is None: from azure.cli.core.cloud import AZURE_US_GOV_CLOUD if cmd.cli_ctx.cloud.name != AZURE_US_GOV_CLOUD.name: namespace.vm_sku = 'Standard_DS1_v2' else: namespace.vm_sku = 'Standard_D1_v2' _validate_location(cmd, namespace, namespace.zones, namespace.vm_sku) validate_asg_names_or_ids(cmd, namespace) _validate_vm_create_storage_profile(cmd, namespace, for_scale_set=True) _validate_vm_vmss_create_vnet(cmd, namespace, for_scale_set=True) _validate_vmss_single_placement_group(namespace) _validate_vmss_create_load_balancer_or_app_gateway(cmd, namespace) _validate_vmss_create_subnet(namespace) _validate_vmss_create_public_ip(cmd, namespace) _validate_vmss_create_nsg(cmd, namespace) _validate_vm_vmss_accelerated_networking(cmd.cli_ctx, namespace) _validate_vm_vmss_create_auth(namespace) _validate_vm_vmss_msi(cmd, namespace) if namespace.license_type and namespace.os_type.lower() != 'windows': raise CLIError('usage error: --license-type is only applicable on Windows VM scaleset') if not namespace.public_ip_per_vm and namespace.vm_domain_name: raise CLIError('Usage error: --vm-domain-name can only be used when --public-ip-per-vm is enabled') if namespace.eviction_policy and not namespace.priority: raise CLIError('Usage error: --priority PRIORITY [--eviction-policy POLICY]') # endregion # region disk, snapshot, image validators def validate_vm_disk(cmd, namespace): namespace.disk = _get_resource_id(cmd.cli_ctx, namespace.disk, namespace.resource_group_name, 'disks', 'Microsoft.Compute') def validate_vmss_disk(cmd, namespace): if namespace.disk: namespace.disk = _get_resource_id(cmd.cli_ctx, namespace.disk, namespace.resource_group_name, 'disks', 'Microsoft.Compute') if bool(namespace.disk) == bool(namespace.size_gb): raise CLIError('usage error: --disk EXIST_DISK --instance-id ID | --size-gb GB') elif bool(namespace.disk) != bool(namespace.instance_id): raise CLIError('usage error: --disk EXIST_DISK --instance-id ID') def process_disk_or_snapshot_create_namespace(cmd, namespace): from msrestazure.azure_exceptions import CloudError validate_tags(namespace) if namespace.source: usage_error = 'usage error: --source {SNAPSHOT | DISK} | --source VHD_BLOB_URI [--source-storage-account-id ID]' try: namespace.source_blob_uri, namespace.source_disk, namespace.source_snapshot = _figure_out_storage_source( cmd.cli_ctx, namespace.resource_group_name, namespace.source) if not namespace.source_blob_uri and namespace.source_storage_account_id: raise CLIError(usage_error) except CloudError: raise CLIError(usage_error) def process_image_create_namespace(cmd, namespace): from msrestazure.tools import parse_resource_id from msrestazure.azure_exceptions import CloudError validate_tags(namespace) try: # try capturing from VM, a most common scenario res_id = _get_resource_id(cmd.cli_ctx, namespace.source, namespace.resource_group_name, 'virtualMachines', 'Microsoft.Compute') res = parse_resource_id(res_id) compute_client = _compute_client_factory(cmd.cli_ctx, subscription_id=res['subscription']) vm_info = compute_client.virtual_machines.get(res['resource_group'], res['name']) # pylint: disable=no-member namespace.os_type = vm_info.storage_profile.os_disk.os_type.value namespace.source_virtual_machine = res_id if namespace.data_disk_sources: raise CLIError("'--data-disk-sources' is not allowed when capturing " "images from virtual machines") except CloudError: namespace.os_blob_uri, namespace.os_disk, namespace.os_snapshot = _figure_out_storage_source(cmd.cli_ctx, namespace.resource_group_name, namespace.source) # pylint: disable=line-too-long namespace.data_blob_uris = [] namespace.data_disks = [] namespace.data_snapshots = [] if namespace.data_disk_sources: for data_disk_source in namespace.data_disk_sources: source_blob_uri, source_disk, source_snapshot = _figure_out_storage_source( cmd.cli_ctx, namespace.resource_group_name, data_disk_source) if source_blob_uri: namespace.data_blob_uris.append(source_blob_uri) if source_disk: namespace.data_disks.append(source_disk) if source_snapshot: namespace.data_snapshots.append(source_snapshot) if not namespace.os_type: raise CLIError("usage error: os type is required to create the image, " "please specify '--os-type OS_TYPE'") def _figure_out_storage_source(cli_ctx, resource_group_name, source): from msrestazure.azure_exceptions import CloudError source_blob_uri = None source_disk = None source_snapshot = None if urlparse(source).scheme: # a uri? source_blob_uri = source elif '/disks/' in source.lower(): source_disk = source elif '/snapshots/' in source.lower(): source_snapshot = source else: compute_client = _compute_client_factory(cli_ctx) # pylint: disable=no-member try: info = compute_client.snapshots.get(resource_group_name, source) source_snapshot = info.id except CloudError: info = compute_client.disks.get(resource_group_name, source) source_disk = info.id return (source_blob_uri, source_disk, source_snapshot) def process_disk_encryption_namespace(cmd, namespace): namespace.disk_encryption_keyvault = _get_resource_id(cmd.cli_ctx, namespace.disk_encryption_keyvault, namespace.resource_group_name, 'vaults', 'Microsoft.KeyVault') if namespace.key_encryption_keyvault: if not namespace.key_encryption_key: raise CLIError("Incorrect usage '--key-encryption-keyvault': " "'--key-encryption-key' is required") namespace.key_encryption_keyvault = _get_resource_id(cmd.cli_ctx, namespace.key_encryption_keyvault, namespace.resource_group_name, 'vaults', 'Microsoft.KeyVault') def process_assign_identity_namespace(cmd, namespace): _validate_vm_vmss_msi(cmd, namespace, from_set_command=True) def process_remove_identity_namespace(cmd, namespace): if namespace.identities: from ._vm_utils import MSI_LOCAL_ID for i in range(len(namespace.identities)): if namespace.identities[i] != MSI_LOCAL_ID: namespace.identities[i] = _get_resource_id(cmd.cli_ctx, namespace.identities[i], namespace.resource_group_name, 'userAssignedIdentities', 'Microsoft.ManagedIdentity') # TODO move to its own command module https://github.com/Azure/azure-cli/issues/5105 def process_msi_namespace(cmd, namespace): get_default_location_from_resource_group(cmd, namespace) validate_tags(namespace) def process_gallery_image_version_namespace(cmd, namespace): TargetRegion = cmd.get_models('TargetRegion') if namespace.target_regions: regions_info = [] for t in namespace.target_regions: parts = t.split('=', 1) if len(parts) == 1: regions_info.append(TargetRegion(name=parts[0])) else: try: replica_count = int(parts[1]) except ValueError: raise CLIError("usage error: {}'s replica count must be an integer".format(parts[0])) regions_info.append(TargetRegion(name=parts[0], regional_replica_count=replica_count)) namespace.target_regions = regions_info # endregion
50.486466
195
0.655949
8,089
67,147
5.163555
0.101743
0.023343
0.009696
0.01992
0.430856
0.321107
0.252681
0.188302
0.145518
0.108863
0
0.012417
0.251612
67,147
1,329
196
50.524454
0.818753
0.066555
0
0.204479
0
0.005842
0.195116
0.022917
0
0
0
0.000752
0
1
0.051607
false
0.021422
0.06037
0.000974
0.148978
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a41876d28109b10beeda61ce63d0fb68903a3e0
4,091
py
Python
scraping/faqscraper.py
ednihs-yahska/unibrowser
c91aaf7df8b316c707e5a268f82e789615be9fb8
[ "Apache-2.0" ]
null
null
null
scraping/faqscraper.py
ednihs-yahska/unibrowser
c91aaf7df8b316c707e5a268f82e789615be9fb8
[ "Apache-2.0" ]
null
null
null
scraping/faqscraper.py
ednihs-yahska/unibrowser
c91aaf7df8b316c707e5a268f82e789615be9fb8
[ "Apache-2.0" ]
null
null
null
import re import httplib2 from bs4 import BeautifulSoup from scraping.faqscrapperutil import stripExtra, removeDuplicates, removeBlackListedQuestions, getBlackListedQuestions, convertToJsonList, saveToMongo from scraping.Constants import ENABLE_CUSTOM_QUESTIONS_FILTER, FAQ_LINKS, COLLECTION_NAME def cleanQuestions(questions): questionList = [] for question in questions: questionList.append(stripExtra(question.lstrip().rstrip())) return removeDuplicates(questionList) def getLastAnswer(question, bodyText): start = bodyText.index(question) + len(question) text = bodyText[start : -1].lstrip() # print(text.lstrip()) whitespaceCount = 0 # print(answerLength) for i in range(0, len(text)): # print(answer[i], ' isSpace : ', answer[i].isspace()) if text[i].isspace(): whitespaceCount = whitespaceCount + 1 if whitespaceCount >= 3: # print(0 + i - 3) # print(text[0 : 0 + i - 2]) return text[0 : 0 + i - 2] else : if whitespaceCount != 0: whitespaceCount = 0 def cleanAnswer(answer): answerLength = len(answer) whitespaceCount = 0 # print(answerLength) for i in range(0, answerLength): # print(answer[i], ' isSpace : ', answer[i].isspace()) if answer[i].isspace(): whitespaceCount = whitespaceCount + 1 if whitespaceCount >= 3: # print(0 + i - 3) return answer[0 : 0 + i - 2].lstrip() else : if whitespaceCount != 0: whitespaceCount = 0 return answer.rstrip() def getAnswers(body, questions): bodyText = body.getText() # answerTag = getAnswerTag(body, bodyText, questions) # print(bodyText) questionCount = len(questions) answerList = [] for i in range(0, questionCount): print('Q: ', questions[i]) if i == questionCount - 1: #Last element answer = getLastAnswer(questions[i], bodyText) else : start = bodyText.index(questions[i]) + len(questions[i]) end = bodyText.index(questions[i + 1], start, -1) print("Start : ", start , " End : ", end) soup1 = BeautifulSoup(bodyText[start : end], 'html.parser') # print(soup1) answer = soup1.getText().lstrip() answer = cleanAnswer(answer) answerList.append(answer) print('A: ', answer) return answerList def processWithCustomQuestions(questions): # isCustomQuestionsEnabled = checkConfigForFlag(ENABLE_CUSTOM_QUESTIONS_FILTER) # print("isCustomQuestionsEnabled : ", isCustomQuestionsEnabled) if ENABLE_CUSTOM_QUESTIONS_FILTER == False: return blackListedQuestions = getBlackListedQuestions() removeBlackListedQuestions(questions, blackListedQuestions) print(questions) def getFaqOfLink(link): # print("LINK : ", link) http = httplib2.Http() status, html = http.request(link) soup = BeautifulSoup(html, 'html.parser') body = soup.body questions = cleanQuestions(soup(text=re.compile(r'\s*((?:how|How|Can|can|what|What|where|Where|describe|Describe|Who|who|When|when|Why|why|Should|should|is|Is|I|Do|do|Are|are|Will|will)[^.<>?]*?\s*\?)'))) # print(questions) processWithCustomQuestions(questions) answerList = getAnswers(body, questions) return questions, answerList # link = "https://transportation.oregonstate.edu/aabc/frequently-asked-questions" # questions, answerList = getFaqOfLink(link) if __name__== "__main__": with open(FAQ_LINKS, 'r') as myfile: FAQ_LINKS = myfile.read().split('\n') faqJsonList = [] for i in range(0, len(FAQ_LINKS)): link = FAQ_LINKS[i] questions, answerList = getFaqOfLink(link) jsonList = convertToJsonList(link, questions, answerList) faqJsonList.extend(jsonList) # saveJsonToFile(faqJsonList, "output.txt") saveToMongo(faqJsonList, COLLECTION_NAME)
36.855856
208
0.633341
410
4,091
6.260976
0.282927
0.037398
0.027269
0.017141
0.160499
0.149591
0.112972
0.112972
0.085703
0.050643
0
0.012052
0.249572
4,091
111
209
36.855856
0.824104
0.190174
0
0.168831
0
0.012987
0.062082
0.045648
0
0
0
0
0
1
0.077922
false
0
0.064935
0
0.233766
0.051948
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a425179166f5e46f9936c55196547277fa5770b
2,600
py
Python
messages/term_utils.py
ckousoulis/macos-messages
acf7ac94a81f7d097e2025c6ec7dd429de010795
[ "MIT" ]
null
null
null
messages/term_utils.py
ckousoulis/macos-messages
acf7ac94a81f7d097e2025c6ec7dd429de010795
[ "MIT" ]
null
null
null
messages/term_utils.py
ckousoulis/macos-messages
acf7ac94a81f7d097e2025c6ec7dd429de010795
[ "MIT" ]
null
null
null
"""Terminal utilities specific to message archives. Creates colored text and helps write Messages output. """ from contextlib import contextmanager import itertools import readline FG_COLORS = dict(itertools.chain( zip(("black", "red", "green", "yellow", "blue", "magenta", "cyan", "white", ), range(30, 38)), zip(("bright_black", "bright_red", "bright_green", "bright_yellow", "bright_blue", "bright_magenta", "bright_cyan", "bright_white", ), range(90, 98)))) BG_COLORS = dict((f"on_{key}", val + 10) for key, val in FG_COLORS.items()) ATTRIBUTES = dict( zip(("bold", "faint", "italic", "underline", "slow_blink", "rapid_blink", "reverse", "conceal", "strikethrough", ), range(1, 10))) def colored(text, color=None, on_color=None, attrs=None, escape=False): """Wraps text with ANSI escape codes to achieve the desired look. Args: color: The foreground color. on_color: The background color. attrs: A list of effects. escape: True to escape invisibles (for readline); else False. Returns: A string with the original text wrapped by escape codes. """ def sgr(*codes): return "\x1b[%sm" % ";".join(map(str, codes)) def esc(text): return "\x01%s\x02" % text codes = [] if color: codes.append(FG_COLORS[color]) if on_color: codes.append(BG_COLORS[on_color]) if attrs: codes.extend(ATTRIBUTES[attr] for attr in attrs) if not escape: esc = lambda n: n return "%s%s%s" % (esc(sgr(*codes)), text, esc(sgr(0))) @contextmanager def readline_disabled(): """Context manager to temporarily disable readline features. """ readline.set_auto_history(False) try: yield finally: readline.set_auto_history(True) def confirm(text): """Presents a yes/no prompt to the user and handles replies. Args: text: A message string to present before confirmation. Returns: True if the user confirmed the prompt; else False. """ replies = { "yes": True, "no": False, } prompt = "%s (yes/no): " % colored("Are you sure?", "red", attrs=["bold"], escape=True) reply = "" with readline_disabled(): print(text) while reply not in replies: try: reply = input(prompt).casefold() except (EOFError, KeyboardInterrupt): reply = "no" print(reply) return replies[reply]
23.423423
75
0.589615
313
2,600
4.814696
0.456869
0.01858
0.021234
0.029197
0
0
0
0
0
0
0
0.010177
0.281923
2,600
110
76
23.636364
0.797001
0.253462
0
0.027397
0
0
0.147541
0
0
0
0
0
0
1
0.068493
false
0
0.041096
0.027397
0.164384
0.027397
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a4336dbd5d1cefd9e382961486ab2a2e96b55c6
2,714
py
Python
tests/test_subpixel_upsample.py
Project-MONAI/MONAI
2bab12c67c3cc1d54a4847628ce1e879064be11c
[ "Apache-2.0" ]
2,971
2019-10-16T23:53:16.000Z
2022-03-31T20:58:24.000Z
tests/test_subpixel_upsample.py
Project-MONAI/MONAI
2bab12c67c3cc1d54a4847628ce1e879064be11c
[ "Apache-2.0" ]
2,851
2020-01-10T16:23:44.000Z
2022-03-31T22:14:53.000Z
tests/test_subpixel_upsample.py
Project-MONAI/MONAI
2bab12c67c3cc1d54a4847628ce1e879064be11c
[ "Apache-2.0" ]
614
2020-01-14T19:18:01.000Z
2022-03-31T14:06:14.000Z
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch import torch.nn as nn from parameterized import parameterized from monai.networks import eval_mode from monai.networks.blocks import SubpixelUpsample from monai.networks.layers.factories import Conv TEST_CASE_SUBPIXEL = [] for inch in range(1, 5): for dim in range(1, 4): for factor in range(1, 3): test_case = [ {"dimensions": dim, "in_channels": inch, "scale_factor": factor}, (2, inch, *([8] * dim)), (2, inch, *([8 * factor] * dim)), ] TEST_CASE_SUBPIXEL.append(test_case) TEST_CASE_SUBPIXEL_2D_EXTRA = [ {"dimensions": 2, "in_channels": 2, "scale_factor": 3}, (2, 2, 8, 4), # different size for H and W (2, 2, 24, 12), ] TEST_CASE_SUBPIXEL_3D_EXTRA = [ {"dimensions": 3, "in_channels": 1, "scale_factor": 2}, (2, 1, 16, 8, 4), # different size for H, W and D (2, 1, 32, 16, 8), ] conv_block = nn.Sequential( Conv[Conv.CONV, 3](1, 4, kernel_size=1), Conv[Conv.CONV, 3](4, 8, kernel_size=3, stride=1, padding=1) ) TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA = [ {"dimensions": 3, "in_channels": 1, "scale_factor": 2, "conv_block": conv_block}, (2, 1, 16, 8, 4), # different size for H, W and D (2, 1, 32, 16, 8), ] TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_2D_EXTRA) TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_3D_EXTRA) TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA) # add every test back with the pad/pool sequential component omitted for tests in list(TEST_CASE_SUBPIXEL): args: dict = tests[0] # type: ignore args = dict(args) args["apply_pad_pool"] = False TEST_CASE_SUBPIXEL.append([args, tests[1], tests[2]]) class TestSUBPIXEL(unittest.TestCase): @parameterized.expand(TEST_CASE_SUBPIXEL) def test_subpixel_shape(self, input_param, input_shape, expected_shape): net = SubpixelUpsample(**input_param) with eval_mode(net): result = net.forward(torch.randn(input_shape)) self.assertEqual(result.shape, expected_shape) if __name__ == "__main__": unittest.main()
34.35443
105
0.687546
402
2,714
4.450249
0.358209
0.071548
0.12521
0.061487
0.226383
0.20123
0.15204
0.130799
0.082728
0.039128
0
0.038638
0.198968
2,714
78
106
34.794872
0.784269
0.26787
0
0.076923
0
0
0.083206
0
0
0
0
0
0.019231
1
0.019231
false
0
0.134615
0
0.173077
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a444601b18de24677a5df5024e0921fdacf4ec7
8,983
py
Python
glue/plugins/export_d3po.py
sergiopasra/glue
c25a217a122a11818382672c99cb21f57a30636f
[ "BSD-3-Clause" ]
1
2019-12-17T07:58:35.000Z
2019-12-17T07:58:35.000Z
glue/plugins/export_d3po.py
sergiopasra/glue
c25a217a122a11818382672c99cb21f57a30636f
[ "BSD-3-Clause" ]
null
null
null
glue/plugins/export_d3po.py
sergiopasra/glue
c25a217a122a11818382672c99cb21f57a30636f
[ "BSD-3-Clause" ]
1
2019-08-04T14:10:12.000Z
2019-08-04T14:10:12.000Z
from __future__ import absolute_import, division, print_function import os import json from glue.core import Subset DISPATCH = {} def save_page(page, page_number, label, subset): """ Convert a tab of a glue session into a D3PO page :param page: Tuple of data viewers to save :param label: Tab label """ result = {} # layout settings result['grid'] = {'nRows': 1, 'nColumns': len(page)} result['name'] = str(label) result['caption'] = 'Generated by Glue' # style settings d = page[0]._data[0] unselected = dict(opacity=d.style.alpha, size=d.style.markersize / 2, color=d.style.color) result['markerStyle'] = dict(unselected=unselected) if subset is not None: s = subset.style selected = dict(opacity=s.alpha, size=s.markersize / 2, color=s.color) result['markerStyle']['selected'] = selected result['selection'] = {'type': 'booleanColumn', 'columnName': 'selection_%i' % page_number} result['histogramStyle'] = result['markerStyle'] # save each plot result['plots'] = list(map(save_plot, page, range(len(page)))) return result def save_plot_base(plot, index): result = {} result['gridPosition'] = [0, index] return result def save_plot(plot, index): typ = type(plot) return DISPATCH[typ](plot, index) def save_scatter(plot, index): """ Convert a single glue scatter plot to a D3PO plot :param plot: Glue scatter plot :class:`~glue.viewers.scatter.qt.ScatterViewer` :param index: 1D index of plot on the page :type index: int :rtype: json-serializable dict """ result = save_plot_base(plot, index) result['type'] = 'scatter' result['xAxis'] = dict(columnName=plot.state.x_att.label, range=[float(plot.state.x_min), float(plot.state.x_max)]) result['yAxis'] = dict(columnName=plot.state.y_att.label, range=[float(plot.state.y_min), float(plot.state.y_max)]) # XXX log scales return result def save_histogram(plot, index): """ Convert a single histogram to a D3PO plot :param plot: Glue histogram :type plot: :class:`~glue.viewers.histogram.qt.HistogramViewer` :param index: 1D index of plot on the page :type index: int :rtype: json-serializable dict """ result = save_plot_base(plot, index) result['type'] = 'histogram' result['xAxis'] = dict(columnName=plot.state.x_att.label, bins=int(plot.state.hist_n_bin), range=[float(plot.state.hist_x_min), float(plot.state.hist_x_max)]) # XXX normed, cumultive, log return result def stage_subsets(application): """ Return a tuple of the subset to use for each stage/tab, or None if the tab has no subset If more than one subset is used per stage/tab, returns None """ result = [] for page in application.viewers: subset = None for viewer in page: for layer_artist in viewer.layers: if not layer_artist.visible: continue s = layer_artist.layer if not isinstance(s, Subset): continue if subset is not None and s is not subset: return None if subset is None: subset = s result.append(subset) return tuple(result) def can_save_d3po(application): """ Check whether an application can be exported to D3PO. Raises an exception if not """ dc = application.session.data_collection if len(dc) != 1: raise ValueError("D3PO Export only supports a single dataset") for tab in application.viewers: for viewer in tab: if not isinstance(viewer, tuple(DISPATCH.keys())): raise ValueError("D3PO Export only supports scatter " "and histogram plots") if sum(len(tab) for tab in application.viewers) == 0: raise ValueError("D3PO Export requires at least one scatterplot " "or histogram") if stage_subsets(application) is None: raise ValueError("D3PO Export restricted to 0 or 1 subsets visible " "in each tab") def make_data_file(data, subsets, path): """ Create the data.csv file, given Data and tuple of subsets """ from astropy.table import Table, Column data_path = os.path.join(path, 'data.csv') t = Table([data[c] for c in data.components], names=[c.label for c in data.components]) for i, subset in enumerate(subsets): if subset is None: continue c = Column(data=subset.to_mask().astype('i'), name='selection_%i' % i) t.add_column(c) t.write(data_path, format='ascii', delimiter=',') def save_d3po(application, path, launch=True): """Save a Glue session to a D3PO bundle. Currently, this has the following restrictions: - The Glue session must have only one dataset open, and 0 or 1 subsets - Only scatter plots or histograms are present - At least one plot is present :param application: Glue appication to save :param path: Path to directory to save in. Will be created if needed """ if os.path.exists(path) and not os.path.isdir(path): os.unlink(path) if not os.path.exists(path): os.mkdir(path) data = application.session.data_collection[0] subsets = stage_subsets(application) viewers = application.viewers # data.csv make_data_file(data, subsets, path) # states.json result = {} result['filename'] = 'data.csv' # XXX don't think this is needed? result['title'] = "Glue export of %s" % data.label result['states'] = list(map(save_page, application.viewers, range(len(viewers)), application.tab_names, subsets)) state_path = os.path.join(path, 'states.json') with open(state_path, 'w') as outfile: json.dump(result, outfile, indent=2, sort_keys=True) # index.html html_path = os.path.join(path, 'index.html') with open(html_path, 'w') as outfile: outfile.write(HTML) # show the result if launch: launch_d3po(path) def launch_d3po(path): """Start a server to view an exported D3PO bundle, and open a browser. :param path: The TLD of the bundle """ from glue.external.six.moves.socketserver import TCPServer from glue.external.six.moves.SimpleHTTPServer import SimpleHTTPRequestHandler from random import randrange from socket import error import webbrowser from threading import Thread os.chdir(path) while True: try: PORT = randrange(8000, 9000) server = TCPServer(("", PORT), SimpleHTTPRequestHandler, False) server.allow_reuse_address = True server.server_bind() break except error: # port already taken pass print('Serving D3PO on port 0.0.0.0:%i' % PORT) server.server_activate() thread = Thread(target=server.serve_forever) thread.setDaemon(True) # do not prevent shutdown thread.start() webbrowser.open('http://0.0.0.0:%i' % PORT) def setup(): from glue.config import exporters exporters.add('D3PO', save_d3po, can_save_d3po, outmode='directory') HTML = """ <!DOCTYPE html> <html> <head> <meta charset="utf-8" /> <link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/style.css"> <link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/d3po.css"> <link href='http://fonts.googleapis.com/css?family=Source+Sans+Pro:100,200,300,400,700' rel='stylesheet' type='text/css'> <style> #footer { position: fixed; bottom: 0; right: 0; } </style> <!-- not to be confused with Planet Telex --> <!-- Javscript dependencies --> <script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script> <script src="http://d3po.org/static/js/util.js"></script> <script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script> <script src="http://d3po.org/static/js/d3po.js"></script> <script src="http://d3po.org/static/js/d3po.init.js"></script> </head> <body> <div id="svg"><svg></svg></div> <div id="controls"> <ul class="navigation"> </ul> </div> <div id="caption"></div> <div id="footer"> More information: <a href="http://d3po.org">d3po.org</a> </div> <script type="text/javascript"> $(document).ready(function() { initialize('states.json', 'data.csv'); } ); </script> </body> </html> """ try: from glue.viewers.scatter.qt import ScatterViewer from glue.viewers.histogram.qt import HistogramViewer except ImportError: pass else: DISPATCH[ScatterViewer] = save_scatter DISPATCH[HistogramViewer] = save_histogram
28.977419
121
0.627296
1,185
8,983
4.693671
0.274262
0.016181
0.015102
0.015282
0.211794
0.140777
0.101402
0.092772
0.086659
0.071197
0
0.012779
0.250807
8,983
309
122
29.071197
0.81367
0.169431
0
0.116402
0
0.031746
0.240259
0.027812
0
0
0
0
0
1
0.058201
false
0.010582
0.079365
0
0.174603
0.010582
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a44c4f1bacc53b31ee5cd71ffc633ea07de715c
5,326
py
Python
src/pyrqlite/connections.py
zmedico/pyrqlite
17a22221e4e796a04c28aa578a93821cc3349b41
[ "MIT" ]
2
2016-04-05T16:16:43.000Z
2016-05-14T12:58:02.000Z
src/pyrqlite/connections.py
zmedico/pyrqlite
17a22221e4e796a04c28aa578a93821cc3349b41
[ "MIT" ]
1
2017-06-04T07:36:45.000Z
2017-06-04T22:57:05.000Z
src/pyrqlite/connections.py
zmedico/pyrqlite
17a22221e4e796a04c28aa578a93821cc3349b41
[ "MIT" ]
1
2016-04-30T20:27:35.000Z
2016-04-30T20:27:35.000Z
from __future__ import unicode_literals import codecs import logging try: from http.client import HTTPConnection, HTTPSConnection except ImportError: # pylint: disable=import-error from httplib import HTTPConnection, HTTPSConnection try: from urllib.parse import urlparse except ImportError: # pylint: disable=import-error from urlparse import urlparse from .constants import ( UNLIMITED_REDIRECTS, ) from .cursors import Cursor from ._ephemeral import EphemeralRqlited as _EphemeralRqlited from .extensions import PARSE_DECLTYPES, PARSE_COLNAMES class Connection(object): from .exceptions import ( Warning, Error, InterfaceError, DatabaseError, DataError, OperationalError, IntegrityError, InternalError, ProgrammingError, NotSupportedError, ) def __init__(self, scheme='http', host='localhost', port=4001, user=None, password=None, connect_timeout=None, detect_types=0, max_redirects=UNLIMITED_REDIRECTS): self.messages = [] self.scheme = scheme self.host = host self.port = port self._headers = {} if not (user is None or password is None): self._headers['Authorization'] = 'Basic ' + \ codecs.encode('{}:{}'.format(user, password).encode('utf-8'), 'base64').decode('utf-8').rstrip('\n') self.connect_timeout = connect_timeout self.max_redirects = max_redirects self.detect_types = detect_types self.parse_decltypes = detect_types & PARSE_DECLTYPES self.parse_colnames = detect_types & PARSE_COLNAMES self._ephemeral = None if scheme == ':memory:': self._ephemeral = _EphemeralRqlited().__enter__() self.host, self.port = self._ephemeral.http self._connection = self._init_connection() def _init_connection(self): if self.scheme in ('http', ':memory:'): cls = HTTPConnection elif self.scheme == 'https': cls = HTTPSConnection else: raise Connection.ProgrammingError('Unsupported scheme %r' % self.scheme) return cls(self.host, port=self.port, timeout=None if self.connect_timeout is None else float(self.connect_timeout)) def _retry_request(self, method, uri, body=None, headers={}): tries = 10 while tries: tries -= 1 try: self._connection.request(method, uri, body=body, headers=dict(self._headers, **headers)) return self._connection.getresponse() except Exception: if not tries: raise self._connection.close() self._connection = self._init_connection() def _fetch_response(self, method, uri, body=None, headers={}): """ Fetch a response, handling redirection. """ response = self._retry_request(method, uri, body=body, headers=headers) redirects = 0 while response.status == 301 and \ response.getheader('Location') is not None and \ (self.max_redirects == UNLIMITED_REDIRECTS or redirects < self.max_redirects): redirects += 1 uri = response.getheader('Location') location = urlparse(uri) logging.getLogger(__name__).debug("status: %s reason: '%s' location: '%s'", response.status, response.reason, uri) if self.host != location.hostname or self.port != location.port: self._connection.close() self.host = location.hostname self.port = location.port self._connection = self._init_connection() response = self._retry_request(method, uri, body=body, headers=headers) return response def close(self): """Close the connection now (rather than whenever .__del__() is called). The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection. The same applies to all cursor objects trying to use the connection. Note that closing a connection without committing the changes first will cause an implicit rollback to be performed.""" self._connection.close() if self._ephemeral is not None: self._ephemeral.__exit__(None, None, None) self._ephemeral = None def __del__(self): self.close() def commit(self): """Database modules that do not support transactions should implement this method with void functionality.""" pass def rollback(self): """This method is optional since not all databases provide transaction support. """ pass def cursor(self, factory=None): """Return a new Cursor Object using the connection.""" if factory: return factory(self) else: return Cursor(self) def execute(self, *args, **kwargs): return self.cursor().execute(*args, **kwargs)
34.141026
97
0.608336
555
5,326
5.677477
0.324324
0.035544
0.020628
0.020946
0.14059
0.134878
0.063472
0.03491
0.03491
0.03491
0
0.00459
0.304544
5,326
155
98
34.36129
0.846112
0.139317
0
0.172727
0
0
0.034707
0
0
0
0
0
0
1
0.090909
false
0.045455
0.127273
0.009091
0.281818
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a45566aab7d64963906b912efac019f5bac9c8e
2,079
py
Python
ironic/tests/api/utils.py
citrix-openstack-build/ironic
4b9eed0aeba44739caa742a48b55d824eae8ec55
[ "Apache-2.0" ]
null
null
null
ironic/tests/api/utils.py
citrix-openstack-build/ironic
4b9eed0aeba44739caa742a48b55d824eae8ec55
[ "Apache-2.0" ]
null
null
null
ironic/tests/api/utils.py
citrix-openstack-build/ironic
4b9eed0aeba44739caa742a48b55d824eae8ec55
[ "Apache-2.0" ]
null
null
null
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utils for testing the API service. """ import datetime import json ADMIN_TOKEN = '4562138218392831' MEMBER_TOKEN = '4562138218392832' class FakeMemcache(object): """Fake cache that is used for keystone tokens lookup.""" _cache = { 'tokens/%s' % ADMIN_TOKEN: { 'access': { 'token': {'id': ADMIN_TOKEN}, 'user': {'id': 'user_id1', 'name': 'user_name1', 'tenantId': '123i2910', 'tenantName': 'mytenant', 'roles': [{'name': 'admin'}] }, } }, 'tokens/%s' % MEMBER_TOKEN: { 'access': { 'token': {'id': MEMBER_TOKEN}, 'user': {'id': 'user_id2', 'name': 'user-good', 'tenantId': 'project-good', 'tenantName': 'goodies', 'roles': [{'name': 'Member'}] } } } } def __init__(self): self.set_key = None self.set_value = None self.token_expiration = None def get(self, key): dt = datetime.datetime.now() + datetime.timedelta(minutes=5) return json.dumps((self._cache.get(key), dt.strftime('%s'))) def set(self, key, value, timeout=None): self.set_value = value self.set_key = key
31.5
78
0.534873
224
2,079
4.875
0.549107
0.054945
0.02381
0.029304
0
0
0
0
0
0
0
0.037281
0.341991
2,079
65
79
31.984615
0.760965
0.338143
0
0.051282
0
0
0.172593
0
0
0
0
0
0
1
0.076923
false
0
0.051282
0
0.205128
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a46bd296a626ee6789a10ae8ade0e121655708c
36,304
py
Python
flytekit/core/workflow.py
milton0825/flytekit
7667a154402d7c02e25006bd6cce926917382a1e
[ "Apache-2.0" ]
null
null
null
flytekit/core/workflow.py
milton0825/flytekit
7667a154402d7c02e25006bd6cce926917382a1e
[ "Apache-2.0" ]
null
null
null
flytekit/core/workflow.py
milton0825/flytekit
7667a154402d7c02e25006bd6cce926917382a1e
[ "Apache-2.0" ]
null
null
null
from __future__ import annotations import collections import inspect from dataclasses import dataclass from enum import Enum from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union from flytekit.common import constants as _common_constants from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException from flytekit.core.base_task import PythonTask from flytekit.core.class_based_resolver import ClassStorageTaskResolver from flytekit.core.condition import ConditionalSection from flytekit.core.context_manager import ( BranchEvalMode, CompilationState, ExecutionState, FlyteContext, FlyteContextManager, FlyteEntities, ) from flytekit.core.interface import ( Interface, transform_inputs_to_parameters, transform_interface_to_typed_interface, transform_signature_to_interface, ) from flytekit.core.launch_plan import LaunchPlan from flytekit.core.node import Node from flytekit.core.promise import ( NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals, ) from flytekit.core.python_auto_container import PythonAutoContainerTask from flytekit.core.reference_entity import ReferenceEntity, WorkflowReference from flytekit.core.type_engine import TypeEngine from flytekit.loggers import logger from flytekit.models import interface as _interface_models from flytekit.models import literals as _literal_models from flytekit.models.core import workflow as _workflow_model GLOBAL_START_NODE = Node( id=_common_constants.GLOBAL_INPUT_NODE_ID, metadata=None, bindings=[], upstream_nodes=[], flyte_entity=None, ) class WorkflowFailurePolicy(Enum): FAIL_IMMEDIATELY = _workflow_model.WorkflowMetadata.OnFailurePolicy.FAIL_IMMEDIATELY FAIL_AFTER_EXECUTABLE_NODES_COMPLETE = ( _workflow_model.WorkflowMetadata.OnFailurePolicy.FAIL_AFTER_EXECUTABLE_NODES_COMPLETE ) @dataclass class WorkflowMetadata(object): on_failure: WorkflowFailurePolicy def __post_init__(self): if ( self.on_failure != WorkflowFailurePolicy.FAIL_IMMEDIATELY and self.on_failure != WorkflowFailurePolicy.FAIL_AFTER_EXECUTABLE_NODES_COMPLETE ): raise FlyteValidationException(f"Failure policy {self.on_failure} not acceptable") def to_flyte_model(self): if self.on_failure == WorkflowFailurePolicy.FAIL_IMMEDIATELY: on_failure = 0 else: on_failure = 1 return _workflow_model.WorkflowMetadata(on_failure=on_failure) @dataclass class WorkflowMetadataDefaults(object): """ This class is similarly named to the one above. Please see the IDL for more information but essentially, this WorkflowMetadataDefaults class represents the defaults that are handed down to a workflow's tasks, whereas WorkflowMetadata represents metadata about the workflow itself. """ interruptible: bool def __post_init__(self): if self.interruptible is not True and self.interruptible is not False: raise FlyteValidationException(f"Interruptible must be boolean, {self.interruptible} invalid") def to_flyte_model(self): return _workflow_model.WorkflowMetadataDefaults(interruptible=self.interruptible) def construct_input_promises(inputs: List[str]): return { input_name: Promise(var=input_name, val=NodeOutput(node=GLOBAL_START_NODE, var=input_name)) for input_name in inputs } def get_promise(binding_data: _literal_models.BindingData, outputs_cache: Dict[Node, Dict[str, Promise]]) -> Promise: """ This is a helper function that will turn a binding into a Promise object, using a lookup map. Please see get_promise_map for the rest of the details. """ if binding_data.promise is not None: if not isinstance(binding_data.promise, NodeOutput): raise FlyteValidationException( f"Binding data Promises have to be of the NodeOutput type {type(binding_data.promise)} found" ) # b.var is the name of the input to the task # binding_data.promise.var is the name of the upstream node's output we want return outputs_cache[binding_data.promise.node][binding_data.promise.var] elif binding_data.scalar is not None: return Promise(var="placeholder", val=_literal_models.Literal(scalar=binding_data.scalar)) elif binding_data.collection is not None: literals = [] for bd in binding_data.collection.bindings: p = get_promise(bd, outputs_cache) literals.append(p.val) return Promise( var="placeholder", val=_literal_models.Literal(collection=_literal_models.LiteralCollection(literals=literals)), ) elif binding_data.map is not None: literals = {} for k, bd in binding_data.map.bindings.items(): p = get_promise(bd, outputs_cache) literals[k] = p.val return Promise( var="placeholder", val=_literal_models.Literal(map=_literal_models.LiteralMap(literals=literals)) ) raise FlyteValidationException("Binding type unrecognized.") def get_promise_map( bindings: List[_literal_models.Binding], outputs_cache: Dict[Node, Dict[str, Promise]] ) -> Dict[str, Promise]: """ Local execution of imperatively defined workflows is done node by node. This function will fill in the node's entity's input arguments, which are specified using the bindings list, and a map of nodes to its outputs. Basically this takes the place of propeller in resolving bindings, pulling in outputs from previously completed nodes and filling in the necessary inputs. """ entity_kwargs = {} for b in bindings: entity_kwargs[b.var] = get_promise(b.binding, outputs_cache) return entity_kwargs class WorkflowBase(object): def __init__( self, name: str, workflow_metadata: WorkflowMetadata, workflow_metadata_defaults: WorkflowMetadataDefaults, python_interface: Interface, **kwargs, ): self._name = name self._workflow_metadata = workflow_metadata self._workflow_metadata_defaults = workflow_metadata_defaults self._python_interface = python_interface self._interface = transform_interface_to_typed_interface(python_interface) self._inputs = {} self._unbound_inputs = set() self._nodes = [] self._output_bindings: Optional[List[_literal_models.Binding]] = [] FlyteEntities.entities.append(self) super().__init__(**kwargs) @property def name(self) -> str: return self._name @property def short_name(self) -> str: return self._name.split(".")[-1] @property def workflow_metadata(self) -> Optional[WorkflowMetadata]: return self._workflow_metadata @property def workflow_metadata_defaults(self): return self._workflow_metadata_defaults @property def python_interface(self) -> Interface: return self._python_interface @property def interface(self) -> _interface_models.TypedInterface: return self._interface @property def output_bindings(self) -> List[_literal_models.Binding]: return self._output_bindings @property def nodes(self) -> List[Node]: return self._nodes def __repr__(self): return ( f"WorkflowBase - {self._name} && " f"Inputs ({len(self._python_interface.inputs)}): {self._python_interface.inputs} && " f"Outputs ({len(self._python_interface.outputs)}): {self._python_interface.outputs} && " f"Output bindings: {self._output_bindings} && " ) def __call__(self, *args, **kwargs): """ The call pattern for Workflows is close to, but not exactly, the call pattern for Tasks. For local execution, it goes __call__ -> _local_execute -> execute From execute, different things happen for the two Workflow styles. For PythonFunctionWorkflows, the Python function is run, for the ImperativeWorkflow, each node is run one at a time. """ if len(args) > 0: raise AssertionError("Only Keyword Arguments are supported for Workflow executions") ctx = FlyteContextManager.current_context() # Get default agruements and override with kwargs passed in input_kwargs = self.python_interface.default_inputs_as_kwargs input_kwargs.update(kwargs) # The first condition is compilation. if ctx.compilation_state is not None: return create_and_link_node(ctx, entity=self, interface=self.python_interface, **input_kwargs) # This condition is hit when this workflow (self) is being called as part of a parent's workflow local run. # The context specifying the local workflow execution has already been set. elif ( ctx.execution_state is not None and ctx.execution_state.mode == ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION ): if ctx.execution_state.branch_eval_mode == BranchEvalMode.BRANCH_SKIPPED: if self.python_interface and self.python_interface.output_tuple_name: variables = [k for k in self.python_interface.outputs.keys()] output_tuple = collections.namedtuple(self.python_interface.output_tuple_name, variables) nones = [None for _ in self.python_interface.outputs.keys()] return output_tuple(*nones) else: return None # We are already in a local execution, just continue the execution context return self._local_execute(ctx, **input_kwargs) # Last is starting a local workflow execution else: # Run some sanity checks # Even though the _local_execute call generally expects inputs to be Promises, we don't have to do the # conversion here in this loop. The reason is because we don't prevent users from specifying inputs # as direct scalars, which means there's another Promise-generating loop inside _local_execute too for k, v in input_kwargs.items(): if k not in self.interface.inputs: raise ValueError(f"Received unexpected keyword argument {k}") if isinstance(v, Promise): raise ValueError(f"Received a promise for a workflow call, when expecting a native value for {k}") with FlyteContextManager.with_context( ctx.with_execution_state( ctx.new_execution_state().with_params(mode=ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION) ) ) as child_ctx: result = self._local_execute(child_ctx, **input_kwargs) expected_outputs = len(self.python_interface.outputs) if expected_outputs == 0: if result is None or isinstance(result, VoidPromise): return None else: raise Exception(f"Workflow local execution expected 0 outputs but something received {result}") if (1 < expected_outputs == len(result)) or (result is not None and expected_outputs == 1): return create_native_named_tuple(ctx, result, self.python_interface) raise ValueError("expected outputs and actual outputs do not match") def execute(self, **kwargs): raise Exception("Should not be called") def _local_execute(self, ctx: FlyteContext, **kwargs) -> Union[Tuple[Promise], Promise, VoidPromise]: # This is done to support the invariant that Workflow local executions always work with Promise objects # holding Flyte literal values. Even in a wf, a user can call a sub-workflow with a Python native value. for k, v in kwargs.items(): if not isinstance(v, Promise): t = self.python_interface.inputs[k] kwargs[k] = Promise(var=k, val=TypeEngine.to_literal(ctx, v, t, self.interface.inputs[k].type)) # The output of this will always be a combination of Python native values and Promises containing Flyte # Literals. function_outputs = self.execute(**kwargs) # First handle the empty return case. # A workflow function may return a task that doesn't return anything # def wf(): # return t1() # or it may not return at all # def wf(): # t1() # In the former case we get the task's VoidPromise, in the latter we get None if isinstance(function_outputs, VoidPromise) or function_outputs is None: if len(self.python_interface.outputs) != 0: raise FlyteValueException( function_outputs, f"{function_outputs} received but interface has {len(self.python_interface.outputs)} outputs.", ) return VoidPromise(self.name) # Because we should've already returned in the above check, we just raise an error here. if len(self.python_interface.outputs) == 0: raise FlyteValueException( function_outputs, f"{function_outputs} received but should've been VoidPromise or None." ) expected_output_names = list(self.python_interface.outputs.keys()) if len(expected_output_names) == 1: # Here we have to handle the fact that the wf could've been declared with a typing.NamedTuple of # length one. That convention is used for naming outputs - and single-length-NamedTuples are # particularly troublesome but elegant handling of them is not a high priority # Again, we're using the output_tuple_name as a proxy. if self.python_interface.output_tuple_name and isinstance(function_outputs, tuple): wf_outputs_as_map = {expected_output_names[0]: function_outputs[0]} else: wf_outputs_as_map = {expected_output_names[0]: function_outputs} else: wf_outputs_as_map = {expected_output_names[i]: function_outputs[i] for i, _ in enumerate(function_outputs)} # Basically we need to repackage the promises coming from the tasks into Promises that match the workflow's # interface. We do that by extracting out the literals, and creating new Promises wf_outputs_as_literal_dict = translate_inputs_to_literals( ctx, wf_outputs_as_map, flyte_interface_types=self.interface.outputs, native_types=self.python_interface.outputs, ) # Recreate new promises that use the workflow's output names. new_promises = [Promise(var, wf_outputs_as_literal_dict[var]) for var in expected_output_names] return create_task_output(new_promises, self.python_interface) class ImperativeWorkflow(WorkflowBase): def __init__( self, name: str, failure_policy: Optional[WorkflowFailurePolicy] = None, interruptible: Optional[bool] = False, ): metadata = WorkflowMetadata(on_failure=failure_policy or WorkflowFailurePolicy.FAIL_IMMEDIATELY) workflow_metadata_defaults = WorkflowMetadataDefaults(interruptible) self._compilation_state = CompilationState(prefix="") self._inputs = {} # This unbound inputs construct is just here to help workflow authors detect issues a bit earlier. It just # keeps track of workflow inputs that you've declared with add_workflow_input but haven't yet consumed. This # is an error that Admin would return at compile time anyways, but this allows flytekit to raise # the error earlier. self._unbound_inputs = set() super().__init__( name=name, workflow_metadata=metadata, workflow_metadata_defaults=workflow_metadata_defaults, python_interface=Interface(), ) @property def compilation_state(self) -> CompilationState: """ Compilation is done a bit at a time, one task or other entity call at a time. This is why this workflow class has to keep track of its own compilation state. """ return self._compilation_state @property def nodes(self) -> List[Node]: return self._compilation_state.nodes @property def inputs(self) -> Dict[str, Promise]: """ This holds the input promises to the workflow. The nodes in these Promise objects should always point to the global start node. """ return self._inputs def __repr__(self): return super().__repr__() + f"Nodes ({len(self.compilation_state.nodes)}): {self.compilation_state.nodes}" def execute(self, **kwargs): """ Called by _local_execute. This function is how local execution for imperative workflows runs. Because when an entity is added using the add_entity function, all inputs to that entity should've been already declared, we can just iterate through the nodes in order and we shouldn't run into any dependency issues. That is, we force the user to declare entities already in a topological sort. To keep track of outputs, we create a map to start things off, filled in only with the workflow inputs (if any). As things are run, their outputs are stored in this map. After all nodes are run, we fill in workflow level outputs the same way as any other previous node. """ if not self.ready(): raise FlyteValidationException(f"Workflow not ready, wf is currently {self}") # Create a map that holds the outputs of each node. intermediate_node_outputs = {GLOBAL_START_NODE: {}} # type: Dict[Node, Dict[str, Promise]] # Start things off with the outputs of the global input node, i.e. the inputs to the workflow. # _local_execute should've already ensured that all the values in kwargs are Promise objects for k, v in kwargs.items(): intermediate_node_outputs[GLOBAL_START_NODE][k] = v # Next iterate through the nodes in order. for node in self.compilation_state.nodes: if node not in intermediate_node_outputs.keys(): intermediate_node_outputs[node] = {} # Retrieve the entity from the node, and call it by looking up the promises the node's bindings require, # and then fill them in using the node output tracker map we have. entity = node.flyte_entity entity_kwargs = get_promise_map(node.bindings, intermediate_node_outputs) # Handle the calling and outputs of each node's entity results = entity(**entity_kwargs) expected_output_names = list(entity.python_interface.outputs.keys()) if isinstance(results, VoidPromise) or results is None: continue # pragma: no cover # Move along, nothing to assign # Because we should've already returned in the above check, we just raise an Exception here. if len(entity.python_interface.outputs) == 0: raise FlyteValueException(results, f"{results} received but should've been VoidPromise or None.") # if there's only one output, if len(expected_output_names) == 1: if entity.python_interface.output_tuple_name and isinstance(results, tuple): intermediate_node_outputs[node][expected_output_names[0]] = results[0] else: intermediate_node_outputs[node][expected_output_names[0]] = results else: if len(results) != len(expected_output_names): raise FlyteValueException(results, f"Different lengths {results} {expected_output_names}") for idx, r in enumerate(results): intermediate_node_outputs[node][expected_output_names[idx]] = r # The rest of this function looks like the above but now we're doing it for the workflow as a whole rather # than just one node at a time. if len(self.python_interface.outputs) == 0: return VoidPromise(self.name) # The values that we return below from the output have to be pulled by fulfilling all of the # workflow's output bindings. # The return style here has to match what 1) what the workflow would've returned had it been declared # functionally, and 2) what a user would return in mock function. That is, if it's a tuple, then it # should be a tuple here, if it's a one element named tuple, then we do a one-element non-named tuple, # if it's a single element then we return a single element if len(self.output_bindings) == 1: # Again use presence of output_tuple_name to understand that we're dealing with a one-element # named tuple if self.python_interface.output_tuple_name: return (get_promise(self.output_bindings[0].binding, intermediate_node_outputs),) # Just a normal single element return get_promise(self.output_bindings[0].binding, intermediate_node_outputs) return tuple([get_promise(b.binding, intermediate_node_outputs) for b in self.output_bindings]) def add_entity(self, entity: Union[PythonTask, LaunchPlan, WorkflowBase], **kwargs) -> Node: """ Anytime you add an entity, all the inputs to the entity must be bound. """ # circular import from flytekit.core.node_creation import create_node ctx = FlyteContext.current_context() if ctx.compilation_state is not None: raise Exception("Can't already be compiling") with FlyteContextManager.with_context(ctx.with_compilation_state(self.compilation_state)) as ctx: n = create_node(entity=entity, **kwargs) def get_input_values(input_value): if isinstance(input_value, list): input_promises = [] for x in input_value: input_promises.extend(get_input_values(x)) return input_promises if isinstance(input_value, dict): input_promises = [] for _, v in input_value.items(): input_promises.extend(get_input_values(v)) return input_promises else: return [input_value] # Every time an entity is added, mark it as used. The above function though will gather all the input # values but we're only interested in the ones that are Promises so let's filter for those. # There's probably a way to clean this up, maybe key off of the name instead of value? all_input_values = get_input_values(kwargs) for input_value in filter(lambda x: isinstance(x, Promise), all_input_values): if input_value in self._unbound_inputs: self._unbound_inputs.remove(input_value) return n def add_workflow_input(self, input_name: str, python_type: Type) -> Interface: """ Adds an input to the workflow. """ if input_name in self._inputs: raise FlyteValidationException(f"Input {input_name} has already been specified for wf {self.name}.") self._python_interface = self._python_interface.with_inputs(extra_inputs={input_name: python_type}) self._interface = transform_interface_to_typed_interface(self._python_interface) self._inputs[input_name] = Promise(var=input_name, val=NodeOutput(node=GLOBAL_START_NODE, var=input_name)) self._unbound_inputs.add(self._inputs[input_name]) return self._inputs[input_name] def add_workflow_output( self, output_name: str, p: Union[Promise, List[Promise], Dict[str, Promise]], python_type: Optional[Type] = None ): """ Add an output with the given name from the given node output. """ if output_name in self._python_interface.outputs: raise FlyteValidationException(f"Output {output_name} already exists in workflow {self.name}") if python_type is None: if type(p) == list or type(p) == dict: raise FlyteValidationException( f"If specifying a list or dict of Promises, you must specify the python_type type for {output_name}" f" starting with the container type (e.g. List[int]" ) python_type = p.ref.node.flyte_entity.python_interface.outputs[p.var] logger.debug(f"Inferring python type for wf output {output_name} from Promise provided {python_type}") flyte_type = TypeEngine.to_literal_type(python_type=python_type) ctx = FlyteContext.current_context() if ctx.compilation_state is not None: raise Exception("Can't already be compiling") with FlyteContextManager.with_context(ctx.with_compilation_state(self.compilation_state)) as ctx: b = binding_from_python_std( ctx, output_name, expected_literal_type=flyte_type, t_value=p, t_value_type=python_type ) self._output_bindings.append(b) self._python_interface = self._python_interface.with_outputs(extra_outputs={output_name: python_type}) self._interface = transform_interface_to_typed_interface(self._python_interface) def add_task(self, task: PythonTask, **kwargs) -> Node: return self.add_entity(task, **kwargs) def add_launch_plan(self, launch_plan: LaunchPlan, **kwargs) -> Node: return self.add_entity(launch_plan, **kwargs) def add_subwf(self, sub_wf: WorkflowBase, **kwargs) -> Node: return self.add_entity(sub_wf, **kwargs) def ready(self) -> bool: """ This function returns whether or not the workflow is in a ready state, which means * Has at least one node * All workflow inputs are bound These conditions assume that all nodes and workflow i/o changes were done with the functions above, which do additional checking. """ if len(self.compilation_state.nodes) == 0: return False if len(self._unbound_inputs) > 0: return False return True class PythonFunctionWorkflow(WorkflowBase, ClassStorageTaskResolver): """ Please read :std:ref:`flyte:divedeep-workflows` first for a high-level understanding of what workflows are in Flyte. This Python object represents a workflow defined by a function and decorated with the :py:func:`@workflow <flytekit.workflow>` decorator. Please see notes on that object for additional information. """ def __init__( self, workflow_function: Callable, metadata: Optional[WorkflowMetadata], default_metadata: Optional[WorkflowMetadataDefaults], ): name = f"{workflow_function.__module__}.{workflow_function.__name__}" self._workflow_function = workflow_function native_interface = transform_signature_to_interface(inspect.signature(workflow_function)) # TODO do we need this - can this not be in launchplan only? # This can be in launch plan only, but is here only so that we don't have to re-evaluate. Or # we can re-evaluate. self._input_parameters = None super().__init__( name=name, workflow_metadata=metadata, workflow_metadata_defaults=default_metadata, python_interface=native_interface, ) @property def function(self): return self._workflow_function def task_name(self, t: PythonAutoContainerTask) -> str: return f"{self.name}.{t.__module__}.{t.name}" def compile(self, **kwargs): """ Supply static Python native values in the kwargs if you want them to be used in the compilation. This mimics a 'closure' in the traditional sense of the word. """ ctx = FlyteContextManager.current_context() self._input_parameters = transform_inputs_to_parameters(ctx, self.python_interface) all_nodes = [] prefix = f"{ctx.compilation_state.prefix}-{self.short_name}-" if ctx.compilation_state is not None else "" with FlyteContextManager.with_context( ctx.with_compilation_state(CompilationState(prefix=prefix, task_resolver=self)) ) as comp_ctx: # Construct the default input promise bindings, but then override with the provided inputs, if any input_kwargs = construct_input_promises([k for k in self.interface.inputs.keys()]) input_kwargs.update(kwargs) workflow_outputs = self._workflow_function(**input_kwargs) all_nodes.extend(comp_ctx.compilation_state.nodes) # This little loop was added as part of the task resolver change. The task resolver interface itself is # more or less stateless (the future-proofing get_all_tasks function notwithstanding). However the # implementation of the TaskResolverMixin that this workflow class inherits from (ClassStorageTaskResolver) # does store state. This loop adds Tasks that are defined within the body of the workflow to the workflow # object itself. for n in comp_ctx.compilation_state.nodes: if isinstance(n.flyte_entity, PythonAutoContainerTask) and n.flyte_entity.task_resolver == self: logger.debug(f"WF {self.name} saving task {n.flyte_entity.name}") self.add(n.flyte_entity) # Iterate through the workflow outputs bindings = [] output_names = list(self.interface.outputs.keys()) # The reason the length 1 case is separate is because the one output might be a list. We don't want to # iterate through the list here, instead we should let the binding creation unwrap it and make a binding # collection/map out of it. if len(output_names) == 1: if isinstance(workflow_outputs, tuple): if len(workflow_outputs) != 1: raise AssertionError( f"The Workflow specification indicates only one return value, received {len(workflow_outputs)}" ) if self.python_interface.output_tuple_name is None: raise AssertionError( "Outputs specification for Workflow does not define a tuple, but return value is a tuple" ) workflow_outputs = workflow_outputs[0] t = self.python_interface.outputs[output_names[0]] b = binding_from_python_std( ctx, output_names[0], self.interface.outputs[output_names[0]].type, workflow_outputs, t, ) bindings.append(b) elif len(output_names) > 1: if not isinstance(workflow_outputs, tuple): raise AssertionError("The Workflow specification indicates multiple return values, received only one") if len(output_names) != len(workflow_outputs): raise Exception(f"Length mismatch {len(output_names)} vs {len(workflow_outputs)}") for i, out in enumerate(output_names): if isinstance(workflow_outputs[i], ConditionalSection): raise AssertionError("A Conditional block (if-else) should always end with an `else_()` clause") t = self.python_interface.outputs[out] b = binding_from_python_std( ctx, out, self.interface.outputs[out].type, workflow_outputs[i], t, ) bindings.append(b) # Save all the things necessary to create an SdkWorkflow, except for the missing project and domain self._nodes = all_nodes self._output_bindings = bindings if not output_names: return None if len(output_names) == 1: return bindings[0] return tuple(bindings) def execute(self, **kwargs): """ This function is here only to try to streamline the pattern between workflows and tasks. Since tasks call execute from dispatch_execute which is in _local_execute, workflows should also call an execute inside _local_execute. This makes mocking cleaner. """ return self._workflow_function(**kwargs) def workflow( _workflow_function=None, failure_policy: Optional[WorkflowFailurePolicy] = None, interruptible: Optional[bool] = False, ): """ This decorator declares a function to be a Flyte workflow. Workflows are declarative entities that construct a DAG of tasks using the data flow between tasks. Unlike a task, the function body of a workflow is evaluated at serialization-time (aka compile-time). This is because while we can determine the entire structure of a task by looking at the function's signature, workflows need to run through the function itself because the body of the function is what expresses the workflow structure. It's also important to note that, local execution notwithstanding, it is not evaluated again when the workflow runs on Flyte. That is, workflows should not call non-Flyte entities since they are only run once (again, this is with respect to the platform, local runs notwithstanding). Please see the :std:doc:`cookbook:sphx_glr_auto_core_flyte_basics_basic_workflow.py` for more usage examples. :param _workflow_function: This argument is implicitly passed and represents the decorated function. :param failure_policy: Use the options in flytekit.WorkflowFailurePolicy :param interruptible: Whether or not tasks launched from this workflow are by default interruptible """ def wrapper(fn): workflow_metadata = WorkflowMetadata(on_failure=failure_policy or WorkflowFailurePolicy.FAIL_IMMEDIATELY) workflow_metadata_defaults = WorkflowMetadataDefaults(interruptible) workflow_instance = PythonFunctionWorkflow( fn, metadata=workflow_metadata, default_metadata=workflow_metadata_defaults ) workflow_instance.compile() return workflow_instance if _workflow_function: return wrapper(_workflow_function) else: return wrapper class ReferenceWorkflow(ReferenceEntity, PythonFunctionWorkflow): """ A reference workflow is a pointer to a workflow that already exists on your Flyte installation. This object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface. If at registration time the interface provided causes an issue with compilation, an error will be returned. """ def __init__( self, project: str, domain: str, name: str, version: str, inputs: Dict[str, Type], outputs: Dict[str, Type] ): super().__init__(WorkflowReference(project, domain, name, version), inputs, outputs) def reference_workflow( project: str, domain: str, name: str, version: str, ) -> Callable[[Callable[..., Any]], ReferenceWorkflow]: """ A reference workflow is a pointer to a workflow that already exists on your Flyte installation. This object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface. If at registration time the interface provided causes an issue with compilation, an error will be returned. """ def wrapper(fn) -> ReferenceWorkflow: interface = transform_signature_to_interface(inspect.signature(fn)) return ReferenceWorkflow(project, domain, name, version, interface.inputs, interface.outputs) return wrapper
46.663239
129
0.675518
4,486
36,304
5.289568
0.14222
0.029078
0.028825
0.01534
0.240002
0.198871
0.161069
0.12647
0.106452
0.095495
0
0.001445
0.256804
36,304
777
130
46.723295
0.878025
0.28231
0
0.256566
0
0
0.086263
0.020867
0
0
0
0.001287
0.010101
1
0.088889
false
0
0.048485
0.036364
0.270707
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a47b1a4041d7e6d082433e91d3935c95f8c494b
12,125
py
Python
nemo/collections/asr/parts/numba/rnnt_loss/rnnt_numpy.py
madhukarkm/NeMo
648c97f076147684bee6aaada209f2f20adcaf5d
[ "Apache-2.0" ]
4,145
2019-09-13T08:29:43.000Z
2022-03-31T18:31:44.000Z
nemo/collections/asr/parts/numba/rnnt_loss/rnnt_numpy.py
madhukarkm/NeMo
648c97f076147684bee6aaada209f2f20adcaf5d
[ "Apache-2.0" ]
2,031
2019-09-17T16:51:39.000Z
2022-03-31T23:52:41.000Z
nemo/collections/asr/parts/numba/rnnt_loss/rnnt_numpy.py
madhukarkm/NeMo
648c97f076147684bee6aaada209f2f20adcaf5d
[ "Apache-2.0" ]
1,041
2019-09-13T10:08:21.000Z
2022-03-30T06:37:38.000Z
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright 2018-2019, Mingkun Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from torch.autograd import Function, Variable from torch.nn import Module def check_type(var, t, name): if var.dtype is not t: raise TypeError("{} must be {}".format(name, t)) def check_contiguous(var, name): if not var.is_contiguous(): raise ValueError("{} must be contiguous".format(name)) def check_dim(var, dim, name): if len(var.shape) != dim: raise ValueError("{} must be {}D".format(name, dim)) def certify_inputs(log_probs, labels, lengths, label_lengths): # check_type(log_probs, torch.float32, "log_probs") check_type(labels, torch.int32, "labels") check_type(label_lengths, torch.int32, "label_lengths") check_type(lengths, torch.int32, "lengths") check_contiguous(log_probs, "log_probs") check_contiguous(labels, "labels") check_contiguous(label_lengths, "label_lengths") check_contiguous(lengths, "lengths") if lengths.shape[0] != log_probs.shape[0]: raise ValueError( f"Must have a length per example. " f"Given lengths dim: {lengths.shape[0]}, " f"Log probs dim : {log_probs.shape[0]}" ) if label_lengths.shape[0] != log_probs.shape[0]: raise ValueError( "Must have a label length per example. " f"Given label lengths dim : {label_lengths.shape[0]}, " f"Log probs dim : {log_probs.shape[0]}" ) check_dim(log_probs, 4, "log_probs") check_dim(labels, 2, "labels") check_dim(lengths, 1, "lenghts") check_dim(label_lengths, 1, "label_lenghts") max_T = torch.max(lengths) max_U = torch.max(label_lengths) T, U = log_probs.shape[1:3] if T != max_T: raise ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}") if U != max_U + 1: raise ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1") def _assert_no_grad(tensor): assert not tensor.requires_grad, ( "gradients only computed for log_probs - please " "mark other tensors as not requiring gradients" ) def forward_pass(log_probs, labels, blank): """ Computes probability of the forward variable alpha. Args: log_probs: Tensor of shape [T, U, V+1] labels: Labels of shape [B, U] blank: Index of the blank token. Returns: A tuple of the forward variable probabilities - alpha of shape [T, U] and the log likelihood of this forward step. """ T, U, _ = log_probs.shape alphas = np.zeros((T, U), dtype='f') for t in range(1, T): alphas[t, 0] = alphas[t - 1, 0] + log_probs[t - 1, 0, blank] for u in range(1, U): alphas[0, u] = alphas[0, u - 1] + log_probs[0, u - 1, labels[u - 1]] for t in range(1, T): for u in range(1, U): no_emit = alphas[t - 1, u] + log_probs[t - 1, u, blank] emit = alphas[t, u - 1] + log_probs[t, u - 1, labels[u - 1]] alphas[t, u] = np.logaddexp(emit, no_emit) loglike = alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank] return alphas, loglike def backward_pass(log_probs, labels, blank): """ Computes probability of the backward variable beta. Args: log_probs: Tensor of shape [T, U, V+1] labels: Labels of shape [B, U] blank: Index of the blank token. Returns: A tuple of the backward variable probabilities - beta of shape [T, U] and the log likelihood of this backward step. """ T, U, _ = log_probs.shape betas = np.zeros((T, U), dtype='f') betas[T - 1, U - 1] = log_probs[T - 1, U - 1, blank] for t in reversed(range(T - 1)): betas[t, U - 1] = betas[t + 1, U - 1] + log_probs[t, U - 1, blank] for u in reversed(range(U - 1)): betas[T - 1, u] = betas[T - 1, u + 1] + log_probs[T - 1, u, labels[u]] for t in reversed(range(T - 1)): for u in reversed(range(U - 1)): no_emit = betas[t + 1, u] + log_probs[t, u, blank] emit = betas[t, u + 1] + log_probs[t, u, labels[u]] betas[t, u] = np.logaddexp(emit, no_emit) return betas, betas[0, 0] def compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda): """ Computes the gradients of the log_probs with respect to the log probability of this step occuring. Args: Args: log_probs: Tensor of shape [T, U, V+1] alphas: Tensor of shape [T, U] which represents the forward variable. betas: Tensor of shape [T, U] which represents the backward variable. labels: Labels of shape [B, U] blank: Index of the blank token. Returns: Gradients of shape [T, U, V+1] with respect to the forward log probability """ T, U, _ = log_probs.shape grads = np.full(log_probs.shape, -float("inf")) log_like = betas[0, 0] # == alphas[T - 1, U - 1] + betas[T - 1, U - 1] # // grad to last blank transition grads[T - 1, U - 1, blank] = alphas[T - 1, U - 1] grads[: T - 1, :, blank] = alphas[: T - 1, :] + betas[1:, :] # // grad to label transition for u, l in enumerate(labels): grads[:, u, l] = alphas[:, u] + betas[:, u + 1] grads = -np.exp(grads + log_probs - log_like) if fastemit_lambda > 0.0: for u, l in enumerate(labels): grads[:, u, l] = (1.0 + fastemit_lambda) * grads[:, u, l] return grads def fastemit_regularization(log_probs, labels, alphas, betas, blank, fastemit_lambda): """ Describes the computation of FastEmit regularization from the paper - [FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148) Args: log_probs: Tensor of shape [T, U, V+1] labels: Unused. Labels of shape [B, U] alphas: Tensor of shape [T, U] which represents the forward variable. betas: Unused. Tensor of shape [T, U] which represents the backward variable. blank: Index of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: The regularized negative log likelihood - lambda * P˜(At, u|x) """ # General calculation of the fastemit regularization alignments T, U, _ = log_probs.shape # alignment = np.zeros((T, U), dtype='float32') # # for t in range(0, T): # alignment[t, U - 1] = alphas[t, U - 1] + betas[t, U - 1] # # for t in range(0, T): # for u in range(0, U - 1): # emit = alphas[t, u] + log_probs[t, u, labels[u]] + betas[t, u + 1] # alignment[t, u] = emit # reg = fastemit_lambda * (alignment[T - 1, U - 1]) # The above is equivalent to below, without need of computing above # reg = fastemit_lambda * (alphas[T - 1, U - 1] + betas[T - 1, U - 1]) # The above is also equivalent to below, without need of computing the betas alignment matrix reg = fastemit_lambda * (alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank]) return -reg def transduce(log_probs, labels, blank=0, fastemit_lambda=0.0): """ Args: log_probs: 3D array with shape [input len, output len + 1, vocab size] labels: 1D array with shape [output time steps] blank: Index of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: float: The negative log-likelihood 3D array: Gradients with respect to the unnormalized input actications 2d arrays: Alphas matrix (TxU) 2d array: Betas matrix (TxU) """ alphas, ll_forward = forward_pass(log_probs, labels, blank) betas, ll_backward = backward_pass(log_probs, labels, blank) grads = compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda) return -ll_forward, grads, alphas, betas def transduce_batch(log_probs, labels, flen, glen, blank=0, fastemit_lambda=0.0): """ Compute the transducer loss of the batch. Args: log_probs: [B, T, U, V+1]. Activation matrix normalized with log-softmax. labels: [B, U+1] - ground truth labels with <SOS> padded as blank token in the beginning. flen: Length vector of the acoustic sequence. glen: Length vector of the target sequence. blank: Id of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: Batch of transducer forward log probabilities (loss) and the gradients of the activation matrix. """ grads = np.zeros_like(log_probs) costs = [] for b in range(log_probs.shape[0]): t = int(flen[b]) u = int(glen[b]) + 1 ll, g, alphas, betas = transduce(log_probs[b, :t, :u, :], labels[b, : u - 1], blank, fastemit_lambda) grads[b, :t, :u, :] = g reg = fastemit_regularization( log_probs[b, :t, :u, :], labels[b, : u - 1], alphas, betas, blank, fastemit_lambda ) ll += reg costs.append(ll) return costs, grads class _RNNT(Function): @staticmethod def forward(ctx, acts, labels, act_lens, label_lens, blank, fastemit_lambda): costs, grads = transduce_batch( acts.detach().cpu().numpy(), labels.cpu().numpy(), act_lens.cpu().numpy(), label_lens.cpu().numpy(), blank, fastemit_lambda, ) costs = torch.FloatTensor([sum(costs)]) grads = torch.Tensor(grads).to(acts) ctx.grads = grads return costs @staticmethod def backward(ctx, grad_output): return ctx.grads, None, None, None, None, None class RNNTLoss(Module): """ Parameters: `blank_label` (int): default 0 - label index of blank token fastemit_lambda: Float scaling factor for FastEmit regularization. """ def __init__(self, blank: int = 0, fastemit_lambda: float = 0.0): super(RNNTLoss, self).__init__() self.blank = blank self.fastemit_lambda = fastemit_lambda self.rnnt = _RNNT.apply def forward(self, acts, labels, act_lens, label_lens): assert len(labels.size()) == 2 _assert_no_grad(labels) _assert_no_grad(act_lens) _assert_no_grad(label_lens) certify_inputs(acts, labels, act_lens, label_lens) acts = torch.nn.functional.log_softmax(acts, -1) return self.rnnt(acts, labels, act_lens, label_lens, self.blank, self.fastemit_lambda) if __name__ == '__main__': loss = RNNTLoss(fastemit_lambda=0.01) torch.manual_seed(0) acts = torch.randn(1, 2, 5, 3) labels = torch.tensor([[0, 2, 1, 2]], dtype=torch.int32) act_lens = torch.tensor([2], dtype=torch.int32) label_lens = torch.tensor([len(labels[0])], dtype=torch.int32) loss_val = loss(acts, labels, act_lens, label_lens)
35.557185
119
0.628784
1,765
12,125
4.215864
0.159207
0.056982
0.008063
0.008063
0.462438
0.42454
0.365005
0.331273
0.324284
0.280742
0
0.020388
0.25567
12,125
340
120
35.661765
0.803989
0.392825
0
0.131579
0
0.006579
0.092391
0.003576
0
0
0
0
0.039474
1
0.098684
false
0.026316
0.026316
0.006579
0.197368
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a48a06f40aea92943e0a0af216f5992d3ce3e19
987
py
Python
dataset_specifications/swirls.py
joeloskarsson/CGAN-regression
1cbcced10d28c11df8500373fc625e5df493f21d
[ "MIT" ]
12
2020-05-22T08:02:27.000Z
2022-03-28T06:14:23.000Z
dataset_specifications/swirls.py
joeloskarsson/CGAN-regression
1cbcced10d28c11df8500373fc625e5df493f21d
[ "MIT" ]
3
2021-06-08T21:33:24.000Z
2022-03-11T07:11:06.000Z
dataset_specifications/swirls.py
joeloskarsson/CGAN-regression
1cbcced10d28c11df8500373fc625e5df493f21d
[ "MIT" ]
null
null
null
import numpy as np import math from dataset_specifications.dataset import Dataset class SwirlsSet(Dataset): def __init__(self): super().__init__() self.name = "swirls" self.n_samples = { "train": 2000, "val": 1000, "test": 1000, } self.y_dim = 2 # 2D heteroskedastic Gaussian mixture model with 2 components def sample_ys(self, xs): n = xs.shape[0] components = np.random.randint(2, size=n) # uniform 0,1 angles = math.pi*components + (math.pi/2.)*xs[:,0] # Angles to centers means = np.stack((np.cos(angles), np.sin(angles)), axis=1) noise = np.random.randn(n, 2) # samples form 2d gaussian std = 0.3 - 0.2*np.abs(xs-1.) ys = means + std*noise return ys def sample(self, n): xs = np.random.uniform(low=0., high=2., size=(n,1)) ys = self.sample_ys(xs) return np.concatenate((xs, ys), axis=1)
25.307692
78
0.563323
138
987
3.934783
0.449275
0.044199
0.022099
0
0
0
0
0
0
0
0
0.047896
0.301925
987
38
79
25.973684
0.740203
0.115502
0
0
0
0
0.020785
0
0
0
0
0
0
1
0.115385
false
0
0.115385
0
0.346154
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a49341d1b3481c67276f3865a9ce768c4be3a18
1,068
py
Python
twitoff/predict.py
dscohen75/twitoff
62d5702e989a6b5fc54aaf9326e240dd63c9fd06
[ "MIT" ]
null
null
null
twitoff/predict.py
dscohen75/twitoff
62d5702e989a6b5fc54aaf9326e240dd63c9fd06
[ "MIT" ]
null
null
null
twitoff/predict.py
dscohen75/twitoff
62d5702e989a6b5fc54aaf9326e240dd63c9fd06
[ "MIT" ]
null
null
null
import numpy as np from sklearn.linear_model import LogisticRegression from .models import User from .twitter import vectorize_tweet def predict_user(user1_name, user2_name, tweet_text): """ Determine and return which user is more likely to say a given Tweet. Example: predict_user('ausen', 'elonmusk', 'Lambda School Rocks!') Returns 1 corresponding to 1st user passed in, or 0 for second. """ user1 = User.query.filter(User.name == user1_name).one() user2 = User.query.filter(User.name == user2_name).one() user1_vect = np.array([tweet.vect for tweet in user1.tweets]) user2_vect = np.array([tweet.vect for tweet in user2.tweets]) vects = np.vstack([user1_vect, user2_vect]) labels = np.concatenate([np.ones(len(user1.tweets)), np.zeros(len(user2.tweets))]) log_reg = LogisticRegression().fit(vects, labels) # We've done the model fitting, now to predict... hypo_tweet_vect = vectorize_tweet(tweet_text) return log_reg.predict(np.array(hypo_tweet_vect).reshape(1,-1))
41.076923
72
0.699438
155
1,068
4.683871
0.458065
0.049587
0.035813
0.052342
0.146006
0.082645
0.082645
0.082645
0
0
0
0.021915
0.188202
1,068
25
73
42.72
0.815456
0.233146
0
0
0
0
0
0
0
0
0
0
0
1
0.066667
false
0
0.266667
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a49cedf036c8174ae69a9ea00ffa4a94799bd4c
6,028
py
Python
feeder/feeder_ucf.py
George-Polya/st-gcn
e3209796d6de160161063e4c93a00c62b35d3591
[ "BSD-2-Clause" ]
null
null
null
feeder/feeder_ucf.py
George-Polya/st-gcn
e3209796d6de160161063e4c93a00c62b35d3591
[ "BSD-2-Clause" ]
null
null
null
feeder/feeder_ucf.py
George-Polya/st-gcn
e3209796d6de160161063e4c93a00c62b35d3591
[ "BSD-2-Clause" ]
null
null
null
# sys import os import sys import numpy as np import random import pickle import json # torch import torch import torch.nn as nn from torchvision import datasets, transforms # operation from . import tools class Feeder_UCF(torch.utils.data.Dataset): """ Feeder for skeleton-based action recognition in kinetics-skeleton dataset Arguments: data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M) label_path: the path to label random_choose: If true, randomly choose a portion of the input sequence random_shift: If true, randomly pad zeros at the begining or end of sequence random_move: If true, perform randomly but continuously changed transformation to input sequence window_size: The length of the output sequence pose_matching: If ture, match the pose between two frames num_person_in: The number of people the feeder can observe in the input sequence num_person_out: The number of people the feeder in the output sequence debug: If true, only use the first 100 samples """ def __init__(self, data_path, label_path, ignore_empty_sample=True, random_choose=False, random_shift=False, random_move=False, window_size=-1, pose_matching=False, num_person_in=5, num_person_out=2, debug=False): self.debug = debug self.data_path = data_path self.label_path = label_path self.random_choose = random_choose self.random_shift = random_shift self.random_move = random_move self.window_size = window_size self.num_person_in = num_person_in self.num_person_out = num_person_out self.pose_matching = pose_matching self.ignore_empty_sample = ignore_empty_sample self.load_data() def load_data(self): # load file list self.sample_name = os.listdir(self.data_path) if self.debug: self.sample_name = self.sample_name[0:2] # load label label_path = self.label_path with open(label_path) as f: label_info = json.load(f) sample_id = [name.split('.')[0] for name in self.sample_name] self.label = np.array( [label_info[id]['label_index'] for id in sample_id]) has_skeleton = np.array( [label_info[id]['has_skeleton'] for id in sample_id]) # ignore the samples which does not has skeleton sequence if self.ignore_empty_sample: self.sample_name = [ s for h, s in zip(has_skeleton, self.sample_name) if h ] self.label = self.label[has_skeleton] # output data shape (N, C, T, V, M) self.N = len(self.sample_name) #sample self.C = 3 #channel self.T = 90000 #frame self.V = 18 #joint self.M = self.num_person_out #person def __len__(self): return len(self.sample_name) def __iter__(self): return self def __getitem__(self, index): # output shape (C, T, V, M) # get data sample_name = self.sample_name[index] sample_path = os.path.join(self.data_path, sample_name) with open(sample_path, 'r') as f: video_info = json.load(f) # fill data_numpy data_numpy = np.zeros((self.C, self.T, self.V, self.num_person_in)) count = 0 for frame_info in video_info['data']: frame_index = frame_info['frame_index'] for m, skeleton_info in enumerate(frame_info["skeleton"]): if m >= self.num_person_in: break pose = skeleton_info['pose'] score = skeleton_info['score'] frame_index = int(frame_index) # print(frame_index) data_numpy[0, frame_index, :, m] = pose[0::2] data_numpy[1, frame_index, :, m] = pose[1::2] data_numpy[2, frame_index, :, m] = score # count += 1 # print(" ",count, " ") # centralization data_numpy[0:2] = data_numpy[0:2] - 0.5 data_numpy[0][data_numpy[2] == 0] = 0 data_numpy[1][data_numpy[2] == 0] = 0 # get & check label index label = video_info['label_index'] assert (self.label[index] == label) # data augmentation if self.random_shift: data_numpy = tools.random_shift(data_numpy) if self.random_choose: data_numpy = tools.random_choose(data_numpy, self.window_size) elif self.window_size > 0: data_numpy = tools.auto_pading(data_numpy, self.window_size) if self.random_move: data_numpy = tools.random_move(data_numpy) # sort by score sort_index = (-data_numpy[2, :, :, :].sum(axis=1)).argsort(axis=1) for t, s in enumerate(sort_index): data_numpy[:, t, :, :] = data_numpy[:, t, :, s].transpose((1, 2, 0)) data_numpy = data_numpy[:, :, :, 0:self.num_person_out] # match poses between 2 frames if self.pose_matching: data_numpy = tools.openpose_match(data_numpy) return data_numpy, label def top_k(self, score, top_k): assert (all(self.label >= 0)) rank = score.argsort() hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)] return sum(hit_top_k) * 1.0 / len(hit_top_k) def top_k_by_category(self, score, top_k): assert (all(self.label >= 0)) return tools.top_k_by_category(self.label, score, top_k) def calculate_recall_precision(self, score): assert (all(self.label >= 0)) return tools.calculate_recall_precision(self.label, score)
35.251462
104
0.588421
806
6,028
4.167494
0.208437
0.072343
0.037511
0.003572
0.114022
0.04674
0.031259
0.019053
0.019053
0
0
0.013193
0.321002
6,028
170
105
35.458824
0.807476
0.191274
0
0.027027
0
0
0.014176
0
0
0
0
0
0.036036
1
0.072072
false
0
0.09009
0.018018
0.225225
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a4a2deaf316f920a36e808eb86cd1a1c9c75edd
2,260
py
Python
tests/common/schema_registry.py
epiphany-platform/cdl-temporary
daa704f379c05d7b733c382058ff88a2549d33d7
[ "Apache-2.0" ]
8
2020-11-04T10:32:03.000Z
2021-09-08T16:46:13.000Z
tests/common/schema_registry.py
epiphany-platform/cdl-temporary
daa704f379c05d7b733c382058ff88a2549d33d7
[ "Apache-2.0" ]
418
2020-11-05T12:43:26.000Z
2021-10-19T02:24:43.000Z
tests/common/schema_registry.py
epiphany-platform/cdl-temporary
daa704f379c05d7b733c382058ff88a2549d33d7
[ "Apache-2.0" ]
13
2020-11-18T12:37:42.000Z
2021-09-16T07:43:27.000Z
import os import subprocess import time import grpc import tests.rpc.proto.schema_registry_pb2 as pb2 import tests.rpc.proto.schema_registry_pb2_grpc as pb2_grpc from tests.common.postgres import PostgresConfig EXE = os.getenv('SCHEMA_REGISTRY_EXE') or 'schema-registry' class SchemaRegistry: def __init__(self, edge_registry_addr, kafka_brokers, postgres_config: PostgresConfig, kafka_group_id='schema_registry', input_port='50101', initial_schema=None): self.edge_registry_addr = edge_registry_addr self.kafka_brokers = kafka_brokers self.kafka_group_id = kafka_group_id self.input_port = input_port self.postgres_config = postgres_config self.initial_schema = initial_schema self.svc = None def start(self): env = { "SCHEMA_REGISTRY_COMMUNICATION_METHOD": 'kafka', "SCHEMA_REGISTRY_KAFKA__BROKERS": self.kafka_brokers, "SCHEMA_REGISTRY_KAFKA__GROUP_ID": self.kafka_group_id, "SCHEMA_REGISTRY_INPUT_PORT": self.input_port, "SCHEMA_REGISTRY_MONITORING__OTEL_SERVICE_NAME": 'schema-registry', "SCHEMA_REGISTRY_MONITORING__STATUS_PORT": '0', "SCHEMA_REGISTRY_SERVICES__EDGE_REGISTRY_URL": self.edge_registry_addr, **self.postgres_config.to_dict("SCHEMA_REGISTRY") } if self.initial_schema is not None: env.update(SCHEMA_REGISTRY_IMPORT_FILE=self.initial_schema) self.svc = subprocess.Popen([EXE], env=env) time.sleep(3) return self def stop(self): self.svc.kill() def create_schema(self, name, destination, query, body, schema_type): with grpc.insecure_channel(f"localhost:{self.input_port}") as channel: stub = pb2_grpc.SchemaRegistryStub(channel) resp = stub.AddSchema( pb2.NewSchema( definition=bytes(body, 'utf-8'), name=name, insert_destination=destination, query_address=query, schema_type=pb2.SchemaType(schema_type=schema_type))) return resp.id
35.873016
83
0.643805
256
2,260
5.320313
0.328125
0.154185
0.044053
0.044053
0.104258
0.104258
0.104258
0
0
0
0
0.009191
0.277876
2,260
62
84
36.451613
0.825368
0
0
0
0
0
0.164602
0.122566
0
0
0
0
0
1
0.076923
false
0
0.153846
0
0.288462
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a4a74e407a1faa40bc78d3ab5dcb9c6741e4b2e
1,743
py
Python
testsuite/tests/apicast/policy/routing/test_routing_policy_catch_all.py
dlaso99/3scale-tests
b31a3b3596af6d632b393e383c0417ea56bd95ca
[ "Apache-2.0" ]
5
2021-11-04T14:09:24.000Z
2021-12-23T13:48:36.000Z
testsuite/tests/apicast/policy/routing/test_routing_policy_catch_all.py
dlaso99/3scale-tests
b31a3b3596af6d632b393e383c0417ea56bd95ca
[ "Apache-2.0" ]
41
2021-11-03T14:27:21.000Z
2022-03-29T14:46:16.000Z
testsuite/tests/apicast/policy/routing/test_routing_policy_catch_all.py
dlaso99/3scale-tests
b31a3b3596af6d632b393e383c0417ea56bd95ca
[ "Apache-2.0" ]
12
2021-11-03T17:28:31.000Z
2021-11-30T12:28:25.000Z
""" When a routing policy is set with an empty condition, it should be loaded correctly and should route all the requests to a correct backend. """ from urllib.parse import urlparse import pytest from packaging.version import Version # noqa # pylint: disable=unused-import from testsuite import TESTED_VERSION, rawobj # noqa # pylint: disable=unused-import from testsuite.echoed_request import EchoedRequest pytestmark = [ pytest.mark.skipif("TESTED_VERSION < Version('2.11')"), pytest.mark.issue("https://issues.redhat.com/browse/THREESCALE-6415")] @pytest.fixture(scope="module") def service_proxy_settings(private_base_url): """ Asserts, that echo api is used as the default backend """ return rawobj.Proxy(private_base_url("echo_api")) @pytest.fixture(scope="module") def service(service, private_base_url): """ Set the routing policy to route all requests to httpbin. (Using the logic that an empty condition should act as a catch all rule) """ proxy = service.proxy.list() proxy.policies.insert(0, rawobj.PolicyConfig( "routing", { "rules": [ { "url": private_base_url("httpbin"), "condition": {}, }]})) return service def test_routing_policy_without_header(api_client, private_base_url): """ Sends a request and asserts, that the routing policy is active and the requests is routed to the correct backend (httpbin) """ parsed_url = urlparse(private_base_url("httpbin")) response = api_client().get("/get") assert response.status_code == 200 echoed_request = EchoedRequest.create(response) assert echoed_request.headers["Host"] == parsed_url.hostname
32.277778
104
0.693058
223
1,743
5.286996
0.457399
0.05598
0.071247
0.039016
0.128923
0.128923
0.071247
0
0
0
0
0.007931
0.204246
1,743
53
105
32.886792
0.842105
0.297762
0
0.071429
0
0
0.125862
0
0
0
0
0
0.071429
1
0.107143
false
0
0.178571
0
0.357143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a4aca2698c2e4be69222dd4573ceaef0614a5f5
2,113
py
Python
ceilometer/data_processing/notifications.py
vmturbo/ceilometer
f856d3c915b738a64bce14967ba8114fe923c1af
[ "Apache-2.0" ]
null
null
null
ceilometer/data_processing/notifications.py
vmturbo/ceilometer
f856d3c915b738a64bce14967ba8114fe923c1af
[ "Apache-2.0" ]
null
null
null
ceilometer/data_processing/notifications.py
vmturbo/ceilometer
f856d3c915b738a64bce14967ba8114fe923c1af
[ "Apache-2.0" ]
1
2019-09-16T02:11:41.000Z
2019-09-16T02:11:41.000Z
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo.config import cfg import oslo.messaging from ceilometer import plugin from ceilometer import sample OPTS = [ cfg.StrOpt('sahara_control_exchange', default='sahara', help="Exchange name for Data Processing notifications."), ] cfg.CONF.register_opts(OPTS) SERVICE = 'sahara' class DataProcessing(plugin.NotificationBase): resource_name = '%s.cluster' % SERVICE @property def event_types(self): return [ '%s.create' % self.resource_name, '%s.update' % self.resource_name, '%s.delete' % self.resource_name, ] @staticmethod def get_targets(conf): """Return a sequence of oslo.messaging.Target It is defining the exchange and topics to be connected for this plugin. """ return [oslo.messaging.Target(topic=topic, exchange=conf.sahara_control_exchange) for topic in conf.notification_topics] def process_notification(self, message): name = message['event_type'].replace(self.resource_name, 'cluster') project_id = message['payload']['project_id'] user_id = message['_context_user_id'] yield sample.Sample.from_notification( name=name, type=sample.TYPE_DELTA, unit='cluster', volume=1, resource_id=message['payload']['cluster_id'], user_id=user_id, project_id=project_id, message=message)
29.347222
79
0.6531
258
2,113
5.236434
0.496124
0.044412
0.047372
0.023686
0
0
0
0
0
0
0
0.005736
0.257454
2,113
71
80
29.760563
0.855322
0.317085
0
0
0
0
0.137882
0.016347
0
0
0
0
0
1
0.078947
false
0
0.105263
0.026316
0.289474
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a4ad309763c3cfd1dc25a625d22290970950e53
946
py
Python
tests/src/Diksha_Reports/usage_by_textbook/download_all_collection_records.py
JalajaTR/cQube
6bf58ab25f0c36709630987ab730bbd5d9192c03
[ "MIT" ]
null
null
null
tests/src/Diksha_Reports/usage_by_textbook/download_all_collection_records.py
JalajaTR/cQube
6bf58ab25f0c36709630987ab730bbd5d9192c03
[ "MIT" ]
2
2022-02-01T00:55:12.000Z
2022-03-29T22:29:09.000Z
tests/src/Diksha_Reports/usage_by_textbook/download_all_collection_records.py
JalajaTR/cQube
6bf58ab25f0c36709630987ab730bbd5d9192c03
[ "MIT" ]
null
null
null
import os import time from selenium.webdriver.support.select import Select from Data.parameters import Data from get_dir import pwd from reuse_func import GetData class All_records_download(): def __init__(self,driver): self.driver = driver self.filename ='' def test_download_csv(self): self.data = GetData() self.p = pwd() self.driver.find_element_by_xpath(Data.hyper_link).click() self.data.page_loading(self.driver) colltype = Select(self.driver.find_element_by_name('collection_type')) colltype.select_by_visible_text(' Overall ') self.data.page_loading(self.driver) self.driver.find_element_by_id(Data.Download).click() time.sleep(4) self.filename = self.p.get_download_dir() + '/collectionType_all_data.csv' time.sleep(2) file = os.path.isfile(self.filename) os.remove(self.filename) return file
28.666667
82
0.684989
126
946
4.904762
0.428571
0.113269
0.067961
0.101942
0.205502
0.093851
0
0
0
0
0
0.002692
0.214588
946
32
83
29.5625
0.829071
0
0
0.08
0
0
0.055085
0.029661
0
0
0
0
0
1
0.08
false
0
0.24
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a4baa5bb9eb37f79466bfa86485901ebe84452b
39,528
py
Python
yt_dlp/cookies.py
Naysabots/yt-dlp
bef4697a6a4c225d010125d6ff6dfbfd4fb76e33
[ "Unlicense" ]
null
null
null
yt_dlp/cookies.py
Naysabots/yt-dlp
bef4697a6a4c225d010125d6ff6dfbfd4fb76e33
[ "Unlicense" ]
null
null
null
yt_dlp/cookies.py
Naysabots/yt-dlp
bef4697a6a4c225d010125d6ff6dfbfd4fb76e33
[ "Unlicense" ]
null
null
null
import contextlib import ctypes import json import os import shutil import struct import subprocess import sys import tempfile from datetime import datetime, timedelta, timezone from enum import Enum, auto from hashlib import pbkdf2_hmac from .aes import ( aes_cbc_decrypt_bytes, aes_gcm_decrypt_and_verify_bytes, unpad_pkcs7, ) from .compat import compat_b64decode, compat_cookiejar_Cookie from .minicurses import MultilinePrinter, QuietMultilinePrinter from .utils import Popen, YoutubeDLCookieJar, error_to_str, expand_path try: import sqlite3 SQLITE_AVAILABLE = True except ImportError: # although sqlite3 is part of the standard library, it is possible to compile python without # sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544 SQLITE_AVAILABLE = False try: import secretstorage SECRETSTORAGE_AVAILABLE = True except ImportError: SECRETSTORAGE_AVAILABLE = False SECRETSTORAGE_UNAVAILABLE_REASON = ( 'as the `secretstorage` module is not installed. ' 'Please install by running `python3 -m pip install secretstorage`.') except Exception as _err: SECRETSTORAGE_AVAILABLE = False SECRETSTORAGE_UNAVAILABLE_REASON = f'as the `secretstorage` module could not be initialized. {_err}' CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'} SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'} class YDLLogger: def __init__(self, ydl=None): self._ydl = ydl def debug(self, message): if self._ydl: self._ydl.write_debug(message) def info(self, message): if self._ydl: self._ydl.to_screen(f'[Cookies] {message}') def warning(self, message, only_once=False): if self._ydl: self._ydl.report_warning(message, only_once) def error(self, message): if self._ydl: self._ydl.report_error(message) def progress_bar(self): """Return a context manager with a print method. (Optional)""" # Do not print to files/pipes, loggers, or when --no-progress is used if not self._ydl or self._ydl.params.get('noprogress') or self._ydl.params.get('logger'): return file = self._ydl._out_files['error'] try: if not file.isatty(): return except BaseException: return printer = MultilinePrinter(file, preserve_output=False) printer.print = lambda message: printer.print_at_line(f'[Cookies] {message}', 0) return printer def _create_progress_bar(logger): if hasattr(logger, 'progress_bar'): printer = logger.progress_bar() if printer: return printer printer = QuietMultilinePrinter() printer.print = lambda _: None return printer def load_cookies(cookie_file, browser_specification, ydl): cookie_jars = [] if browser_specification is not None: browser_name, profile, keyring = _parse_browser_specification(*browser_specification) cookie_jars.append(extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring)) if cookie_file is not None: cookie_file = expand_path(cookie_file) jar = YoutubeDLCookieJar(cookie_file) if os.access(cookie_file, os.R_OK): jar.load(ignore_discard=True, ignore_expires=True) cookie_jars.append(jar) return _merge_cookie_jars(cookie_jars) def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None): if browser_name == 'firefox': return _extract_firefox_cookies(profile, logger) elif browser_name == 'safari': return _extract_safari_cookies(profile, logger) elif browser_name in CHROMIUM_BASED_BROWSERS: return _extract_chrome_cookies(browser_name, profile, keyring, logger) else: raise ValueError(f'unknown browser: {browser_name}') def _extract_firefox_cookies(profile, logger): logger.info('Extracting cookies from firefox') if not SQLITE_AVAILABLE: logger.warning('Cannot extract cookies from firefox without sqlite3 support. ' 'Please use a python interpreter compiled with sqlite3 support') return YoutubeDLCookieJar() if profile is None: search_root = _firefox_browser_dir() elif _is_path(profile): search_root = profile else: search_root = os.path.join(_firefox_browser_dir(), profile) cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger) if cookie_database_path is None: raise FileNotFoundError(f'could not find firefox cookies database in {search_root}') logger.debug(f'Extracting cookies from: "{cookie_database_path}"') with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir: cursor = None try: cursor = _open_database_copy(cookie_database_path, tmpdir) cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies') jar = YoutubeDLCookieJar() with _create_progress_bar(logger) as progress_bar: table = cursor.fetchall() total_cookie_count = len(table) for i, (host, name, value, path, expiry, is_secure) in enumerate(table): progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}') cookie = compat_cookiejar_Cookie( version=0, name=name, value=value, port=None, port_specified=False, domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'), path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False, comment=None, comment_url=None, rest={}) jar.set_cookie(cookie) logger.info(f'Extracted {len(jar)} cookies from firefox') return jar finally: if cursor is not None: cursor.connection.close() def _firefox_browser_dir(): if sys.platform in ('linux', 'linux2'): return os.path.expanduser('~/.mozilla/firefox') elif sys.platform == 'win32': return os.path.expandvars(R'%APPDATA%\Mozilla\Firefox\Profiles') elif sys.platform == 'darwin': return os.path.expanduser('~/Library/Application Support/Firefox') else: raise ValueError(f'unsupported platform: {sys.platform}') def _get_chromium_based_browser_settings(browser_name): # https://chromium.googlesource.com/chromium/src/+/HEAD/docs/user_data_dir.md if sys.platform in ('linux', 'linux2'): config = _config_home() browser_dir = { 'brave': os.path.join(config, 'BraveSoftware/Brave-Browser'), 'chrome': os.path.join(config, 'google-chrome'), 'chromium': os.path.join(config, 'chromium'), 'edge': os.path.join(config, 'microsoft-edge'), 'opera': os.path.join(config, 'opera'), 'vivaldi': os.path.join(config, 'vivaldi'), }[browser_name] elif sys.platform == 'win32': appdata_local = os.path.expandvars('%LOCALAPPDATA%') appdata_roaming = os.path.expandvars('%APPDATA%') browser_dir = { 'brave': os.path.join(appdata_local, R'BraveSoftware\Brave-Browser\User Data'), 'chrome': os.path.join(appdata_local, R'Google\Chrome\User Data'), 'chromium': os.path.join(appdata_local, R'Chromium\User Data'), 'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'), 'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'), 'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'), }[browser_name] elif sys.platform == 'darwin': appdata = os.path.expanduser('~/Library/Application Support') browser_dir = { 'brave': os.path.join(appdata, 'BraveSoftware/Brave-Browser'), 'chrome': os.path.join(appdata, 'Google/Chrome'), 'chromium': os.path.join(appdata, 'Chromium'), 'edge': os.path.join(appdata, 'Microsoft Edge'), 'opera': os.path.join(appdata, 'com.operasoftware.Opera'), 'vivaldi': os.path.join(appdata, 'Vivaldi'), }[browser_name] else: raise ValueError(f'unsupported platform: {sys.platform}') # Linux keyring names can be determined by snooping on dbus while opening the browser in KDE: # dbus-monitor "interface='org.kde.KWallet'" "type=method_return" keyring_name = { 'brave': 'Brave', 'chrome': 'Chrome', 'chromium': 'Chromium', 'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium', 'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium', 'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome', }[browser_name] browsers_without_profiles = {'opera'} return { 'browser_dir': browser_dir, 'keyring_name': keyring_name, 'supports_profiles': browser_name not in browsers_without_profiles } def _extract_chrome_cookies(browser_name, profile, keyring, logger): logger.info(f'Extracting cookies from {browser_name}') if not SQLITE_AVAILABLE: logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. ' 'Please use a python interpreter compiled with sqlite3 support') return YoutubeDLCookieJar() config = _get_chromium_based_browser_settings(browser_name) if profile is None: search_root = config['browser_dir'] elif _is_path(profile): search_root = profile config['browser_dir'] = os.path.dirname(profile) if config['supports_profiles'] else profile else: if config['supports_profiles']: search_root = os.path.join(config['browser_dir'], profile) else: logger.error(f'{browser_name} does not support profiles') search_root = config['browser_dir'] cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger) if cookie_database_path is None: raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"') logger.debug(f'Extracting cookies from: "{cookie_database_path}"') decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring) with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir: cursor = None try: cursor = _open_database_copy(cookie_database_path, tmpdir) cursor.connection.text_factory = bytes column_names = _get_column_names(cursor, 'cookies') secure_column = 'is_secure' if 'is_secure' in column_names else 'secure' cursor.execute(f'SELECT host_key, name, value, encrypted_value, path, expires_utc, {secure_column} FROM cookies') jar = YoutubeDLCookieJar() failed_cookies = 0 unencrypted_cookies = 0 with _create_progress_bar(logger) as progress_bar: table = cursor.fetchall() total_cookie_count = len(table) for i, line in enumerate(table): progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}') is_encrypted, cookie = _process_chrome_cookie(decryptor, *line) if not cookie: failed_cookies += 1 continue elif not is_encrypted: unencrypted_cookies += 1 jar.set_cookie(cookie) if failed_cookies > 0: failed_message = f' ({failed_cookies} could not be decrypted)' else: failed_message = '' logger.info(f'Extracted {len(jar)} cookies from {browser_name}{failed_message}') counts = decryptor.cookie_counts.copy() counts['unencrypted'] = unencrypted_cookies logger.debug(f'cookie version breakdown: {counts}') return jar finally: if cursor is not None: cursor.connection.close() def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, path, expires_utc, is_secure): host_key = host_key.decode('utf-8') name = name.decode('utf-8') value = value.decode('utf-8') path = path.decode('utf-8') is_encrypted = not value and encrypted_value if is_encrypted: value = decryptor.decrypt(encrypted_value) if value is None: return is_encrypted, None return is_encrypted, compat_cookiejar_Cookie( version=0, name=name, value=value, port=None, port_specified=False, domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'), path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False, comment=None, comment_url=None, rest={}) class ChromeCookieDecryptor: """ Overview: Linux: - cookies are either v10 or v11 - v10: AES-CBC encrypted with a fixed key - v11: AES-CBC encrypted with an OS protected key (keyring) - v11 keys can be stored in various places depending on the activate desktop environment [2] Mac: - cookies are either v10 or not v10 - v10: AES-CBC encrypted with an OS protected key (keyring) and more key derivation iterations than linux - not v10: 'old data' stored as plaintext Windows: - cookies are either v10 or not v10 - v10: AES-GCM encrypted with a key which is encrypted with DPAPI - not v10: encrypted with DPAPI Sources: - [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/ - [2] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_linux.cc - KeyStorageLinux::CreateService """ def decrypt(self, encrypted_value): raise NotImplementedError('Must be implemented by sub classes') @property def cookie_counts(self): raise NotImplementedError('Must be implemented by sub classes') def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None): if sys.platform in ('linux', 'linux2'): return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring) elif sys.platform == 'darwin': return MacChromeCookieDecryptor(browser_keyring_name, logger) elif sys.platform == 'win32': return WindowsChromeCookieDecryptor(browser_root, logger) else: raise NotImplementedError(f'Chrome cookie decryption is not supported on this platform: {sys.platform}') class LinuxChromeCookieDecryptor(ChromeCookieDecryptor): def __init__(self, browser_keyring_name, logger, *, keyring=None): self._logger = logger self._v10_key = self.derive_key(b'peanuts') password = _get_linux_keyring_password(browser_keyring_name, keyring, logger) self._v11_key = None if password is None else self.derive_key(password) self._cookie_counts = {'v10': 0, 'v11': 0, 'other': 0} @staticmethod def derive_key(password): # values from # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_linux.cc return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1, key_length=16) @property def cookie_counts(self): return self._cookie_counts def decrypt(self, encrypted_value): version = encrypted_value[:3] ciphertext = encrypted_value[3:] if version == b'v10': self._cookie_counts['v10'] += 1 return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger) elif version == b'v11': self._cookie_counts['v11'] += 1 if self._v11_key is None: self._logger.warning('cannot decrypt v11 cookies: no key found', only_once=True) return None return _decrypt_aes_cbc(ciphertext, self._v11_key, self._logger) else: self._cookie_counts['other'] += 1 return None class MacChromeCookieDecryptor(ChromeCookieDecryptor): def __init__(self, browser_keyring_name, logger): self._logger = logger password = _get_mac_keyring_password(browser_keyring_name, logger) self._v10_key = None if password is None else self.derive_key(password) self._cookie_counts = {'v10': 0, 'other': 0} @staticmethod def derive_key(password): # values from # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1003, key_length=16) @property def cookie_counts(self): return self._cookie_counts def decrypt(self, encrypted_value): version = encrypted_value[:3] ciphertext = encrypted_value[3:] if version == b'v10': self._cookie_counts['v10'] += 1 if self._v10_key is None: self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True) return None return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger) else: self._cookie_counts['other'] += 1 # other prefixes are considered 'old data' which were stored as plaintext # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm return encrypted_value class WindowsChromeCookieDecryptor(ChromeCookieDecryptor): def __init__(self, browser_root, logger): self._logger = logger self._v10_key = _get_windows_v10_key(browser_root, logger) self._cookie_counts = {'v10': 0, 'other': 0} @property def cookie_counts(self): return self._cookie_counts def decrypt(self, encrypted_value): version = encrypted_value[:3] ciphertext = encrypted_value[3:] if version == b'v10': self._cookie_counts['v10'] += 1 if self._v10_key is None: self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True) return None # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc # kNonceLength nonce_length = 96 // 8 # boringssl # EVP_AEAD_AES_GCM_TAG_LEN authentication_tag_length = 16 raw_ciphertext = ciphertext nonce = raw_ciphertext[:nonce_length] ciphertext = raw_ciphertext[nonce_length:-authentication_tag_length] authentication_tag = raw_ciphertext[-authentication_tag_length:] return _decrypt_aes_gcm(ciphertext, self._v10_key, nonce, authentication_tag, self._logger) else: self._cookie_counts['other'] += 1 # any other prefix means the data is DPAPI encrypted # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc return _decrypt_windows_dpapi(encrypted_value, self._logger).decode('utf-8') def _extract_safari_cookies(profile, logger): if profile is not None: logger.error('safari does not support profiles') if sys.platform != 'darwin': raise ValueError(f'unsupported platform: {sys.platform}') cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies') if not os.path.isfile(cookies_path): logger.debug('Trying secondary cookie location') cookies_path = os.path.expanduser('~/Library/Containers/com.apple.Safari/Data/Library/Cookies/Cookies.binarycookies') if not os.path.isfile(cookies_path): raise FileNotFoundError('could not find safari cookies database') with open(cookies_path, 'rb') as f: cookies_data = f.read() jar = parse_safari_cookies(cookies_data, logger=logger) logger.info(f'Extracted {len(jar)} cookies from safari') return jar class ParserError(Exception): pass class DataParser: def __init__(self, data, logger): self._data = data self.cursor = 0 self._logger = logger def read_bytes(self, num_bytes): if num_bytes < 0: raise ParserError(f'invalid read of {num_bytes} bytes') end = self.cursor + num_bytes if end > len(self._data): raise ParserError('reached end of input') data = self._data[self.cursor:end] self.cursor = end return data def expect_bytes(self, expected_value, message): value = self.read_bytes(len(expected_value)) if value != expected_value: raise ParserError(f'unexpected value: {value} != {expected_value} ({message})') def read_uint(self, big_endian=False): data_format = '>I' if big_endian else '<I' return struct.unpack(data_format, self.read_bytes(4))[0] def read_double(self, big_endian=False): data_format = '>d' if big_endian else '<d' return struct.unpack(data_format, self.read_bytes(8))[0] def read_cstring(self): buffer = [] while True: c = self.read_bytes(1) if c == b'\x00': return b''.join(buffer).decode('utf-8') else: buffer.append(c) def skip(self, num_bytes, description='unknown'): if num_bytes > 0: self._logger.debug(f'skipping {num_bytes} bytes ({description}): {self.read_bytes(num_bytes)!r}') elif num_bytes < 0: raise ParserError(f'invalid skip of {num_bytes} bytes') def skip_to(self, offset, description='unknown'): self.skip(offset - self.cursor, description) def skip_to_end(self, description='unknown'): self.skip_to(len(self._data), description) def _mac_absolute_time_to_posix(timestamp): return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp()) def _parse_safari_cookies_header(data, logger): p = DataParser(data, logger) p.expect_bytes(b'cook', 'database signature') number_of_pages = p.read_uint(big_endian=True) page_sizes = [p.read_uint(big_endian=True) for _ in range(number_of_pages)] return page_sizes, p.cursor def _parse_safari_cookies_page(data, jar, logger): p = DataParser(data, logger) p.expect_bytes(b'\x00\x00\x01\x00', 'page signature') number_of_cookies = p.read_uint() record_offsets = [p.read_uint() for _ in range(number_of_cookies)] if number_of_cookies == 0: logger.debug(f'a cookies page of size {len(data)} has no cookies') return p.skip_to(record_offsets[0], 'unknown page header field') with _create_progress_bar(logger) as progress_bar: for i, record_offset in enumerate(record_offsets): progress_bar.print(f'Loading cookie {i: 6d}/{number_of_cookies: 6d}') p.skip_to(record_offset, 'space between records') record_length = _parse_safari_cookies_record(data[record_offset:], jar, logger) p.read_bytes(record_length) p.skip_to_end('space in between pages') def _parse_safari_cookies_record(data, jar, logger): p = DataParser(data, logger) record_size = p.read_uint() p.skip(4, 'unknown record field 1') flags = p.read_uint() is_secure = bool(flags & 0x0001) p.skip(4, 'unknown record field 2') domain_offset = p.read_uint() name_offset = p.read_uint() path_offset = p.read_uint() value_offset = p.read_uint() p.skip(8, 'unknown record field 3') expiration_date = _mac_absolute_time_to_posix(p.read_double()) _creation_date = _mac_absolute_time_to_posix(p.read_double()) # noqa: F841 try: p.skip_to(domain_offset) domain = p.read_cstring() p.skip_to(name_offset) name = p.read_cstring() p.skip_to(path_offset) path = p.read_cstring() p.skip_to(value_offset) value = p.read_cstring() except UnicodeDecodeError: logger.warning('failed to parse Safari cookie because UTF-8 decoding failed', only_once=True) return record_size p.skip_to(record_size, 'space at the end of the record') cookie = compat_cookiejar_Cookie( version=0, name=name, value=value, port=None, port_specified=False, domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'), path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False, comment=None, comment_url=None, rest={}) jar.set_cookie(cookie) return record_size def parse_safari_cookies(data, jar=None, logger=YDLLogger()): """ References: - https://github.com/libyal/dtformats/blob/main/documentation/Safari%20Cookies.asciidoc - this data appears to be out of date but the important parts of the database structure is the same - there are a few bytes here and there which are skipped during parsing """ if jar is None: jar = YoutubeDLCookieJar() page_sizes, body_start = _parse_safari_cookies_header(data, logger) p = DataParser(data[body_start:], logger) for page_size in page_sizes: _parse_safari_cookies_page(p.read_bytes(page_size), jar, logger) p.skip_to_end('footer') return jar class _LinuxDesktopEnvironment(Enum): """ https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.h DesktopEnvironment """ OTHER = auto() CINNAMON = auto() GNOME = auto() KDE = auto() PANTHEON = auto() UNITY = auto() XFCE = auto() class _LinuxKeyring(Enum): """ https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.h SelectedLinuxBackend """ KWALLET = auto() GNOMEKEYRING = auto() BASICTEXT = auto() SUPPORTED_KEYRINGS = _LinuxKeyring.__members__.keys() def _get_linux_desktop_environment(env): """ https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.cc GetDesktopEnvironment """ xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None) desktop_session = env.get('DESKTOP_SESSION', None) if xdg_current_desktop is not None: xdg_current_desktop = xdg_current_desktop.split(':')[0].strip() if xdg_current_desktop == 'Unity': if desktop_session is not None and 'gnome-fallback' in desktop_session: return _LinuxDesktopEnvironment.GNOME else: return _LinuxDesktopEnvironment.UNITY elif xdg_current_desktop == 'GNOME': return _LinuxDesktopEnvironment.GNOME elif xdg_current_desktop == 'X-Cinnamon': return _LinuxDesktopEnvironment.CINNAMON elif xdg_current_desktop == 'KDE': return _LinuxDesktopEnvironment.KDE elif xdg_current_desktop == 'Pantheon': return _LinuxDesktopEnvironment.PANTHEON elif xdg_current_desktop == 'XFCE': return _LinuxDesktopEnvironment.XFCE elif desktop_session is not None: if desktop_session in ('mate', 'gnome'): return _LinuxDesktopEnvironment.GNOME elif 'kde' in desktop_session: return _LinuxDesktopEnvironment.KDE elif 'xfce' in desktop_session: return _LinuxDesktopEnvironment.XFCE else: if 'GNOME_DESKTOP_SESSION_ID' in env: return _LinuxDesktopEnvironment.GNOME elif 'KDE_FULL_SESSION' in env: return _LinuxDesktopEnvironment.KDE return _LinuxDesktopEnvironment.OTHER def _choose_linux_keyring(logger): """ https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.cc SelectBackend """ desktop_environment = _get_linux_desktop_environment(os.environ) logger.debug(f'detected desktop environment: {desktop_environment.name}') if desktop_environment == _LinuxDesktopEnvironment.KDE: linux_keyring = _LinuxKeyring.KWALLET elif desktop_environment == _LinuxDesktopEnvironment.OTHER: linux_keyring = _LinuxKeyring.BASICTEXT else: linux_keyring = _LinuxKeyring.GNOMEKEYRING return linux_keyring def _get_kwallet_network_wallet(logger): """ The name of the wallet used to store network passwords. https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/kwallet_dbus.cc KWalletDBus::NetworkWallet which does a dbus call to the following function: https://api.kde.org/frameworks/kwallet/html/classKWallet_1_1Wallet.html Wallet::NetworkWallet """ default_wallet = 'kdewallet' try: proc = Popen([ 'dbus-send', '--session', '--print-reply=literal', '--dest=org.kde.kwalletd5', '/modules/kwalletd5', 'org.kde.KWallet.networkWallet' ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) stdout, stderr = proc.communicate_or_kill() if proc.returncode != 0: logger.warning('failed to read NetworkWallet') return default_wallet else: network_wallet = stdout.decode('utf-8').strip() logger.debug(f'NetworkWallet = "{network_wallet}"') return network_wallet except Exception as e: logger.warning(f'exception while obtaining NetworkWallet: {e}') return default_wallet def _get_kwallet_password(browser_keyring_name, logger): logger.debug('using kwallet-query to obtain password from kwallet') if shutil.which('kwallet-query') is None: logger.error('kwallet-query command not found. KWallet and kwallet-query ' 'must be installed to read from KWallet. kwallet-query should be' 'included in the kwallet package for your distribution') return b'' network_wallet = _get_kwallet_network_wallet(logger) try: proc = Popen([ 'kwallet-query', '--read-password', f'{browser_keyring_name} Safe Storage', '--folder', f'{browser_keyring_name} Keys', network_wallet ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) stdout, stderr = proc.communicate_or_kill() if proc.returncode != 0: logger.error(f'kwallet-query failed with return code {proc.returncode}. Please consult ' 'the kwallet-query man page for details') return b'' else: if stdout.lower().startswith(b'failed to read'): logger.debug('failed to read password from kwallet. Using empty string instead') # this sometimes occurs in KDE because chrome does not check hasEntry and instead # just tries to read the value (which kwallet returns "") whereas kwallet-query # checks hasEntry. To verify this: # dbus-monitor "interface='org.kde.KWallet'" "type=method_return" # while starting chrome. # this may be a bug as the intended behaviour is to generate a random password and store # it, but that doesn't matter here. return b'' else: logger.debug('password found') if stdout[-1:] == b'\n': stdout = stdout[:-1] return stdout except Exception as e: logger.warning(f'exception running kwallet-query: {error_to_str(e)}') return b'' def _get_gnome_keyring_password(browser_keyring_name, logger): if not SECRETSTORAGE_AVAILABLE: logger.error(f'secretstorage not available {SECRETSTORAGE_UNAVAILABLE_REASON}') return b'' # the Gnome keyring does not seem to organise keys in the same way as KWallet, # using `dbus-monitor` during startup, it can be observed that chromium lists all keys # and presumably searches for its key in the list. It appears that we must do the same. # https://github.com/jaraco/keyring/issues/556 with contextlib.closing(secretstorage.dbus_init()) as con: col = secretstorage.get_default_collection(con) for item in col.get_all_items(): if item.get_label() == f'{browser_keyring_name} Safe Storage': return item.get_secret() else: logger.error('failed to read from keyring') return b'' def _get_linux_keyring_password(browser_keyring_name, keyring, logger): # note: chrome/chromium can be run with the following flags to determine which keyring backend # it has chosen to use # chromium --enable-logging=stderr --v=1 2>&1 | grep key_storage_ # Chromium supports a flag: --password-store=<basic|gnome|kwallet> so the automatic detection # will not be sufficient in all cases. keyring = _LinuxKeyring[keyring] if keyring else _choose_linux_keyring(logger) logger.debug(f'Chosen keyring: {keyring.name}') if keyring == _LinuxKeyring.KWALLET: return _get_kwallet_password(browser_keyring_name, logger) elif keyring == _LinuxKeyring.GNOMEKEYRING: return _get_gnome_keyring_password(browser_keyring_name, logger) elif keyring == _LinuxKeyring.BASICTEXT: # when basic text is chosen, all cookies are stored as v10 (so no keyring password is required) return None assert False, f'Unknown keyring {keyring}' def _get_mac_keyring_password(browser_keyring_name, logger): logger.debug('using find-generic-password to obtain password from OSX keychain') try: proc = Popen( ['security', 'find-generic-password', '-w', # write password to stdout '-a', browser_keyring_name, # match 'account' '-s', f'{browser_keyring_name} Safe Storage'], # match 'service' stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) stdout, stderr = proc.communicate_or_kill() if stdout[-1:] == b'\n': stdout = stdout[:-1] return stdout except Exception as e: logger.warning(f'exception running find-generic-password: {error_to_str(e)}') return None def _get_windows_v10_key(browser_root, logger): path = _find_most_recently_used_file(browser_root, 'Local State', logger) if path is None: logger.error('could not find local state file') return None logger.debug(f'Found local state file at "{path}"') with open(path, encoding='utf8') as f: data = json.load(f) try: base64_key = data['os_crypt']['encrypted_key'] except KeyError: logger.error('no encrypted key in Local State') return None encrypted_key = compat_b64decode(base64_key) prefix = b'DPAPI' if not encrypted_key.startswith(prefix): logger.error('invalid key') return None return _decrypt_windows_dpapi(encrypted_key[len(prefix):], logger) def pbkdf2_sha1(password, salt, iterations, key_length): return pbkdf2_hmac('sha1', password, salt, iterations, key_length) def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16): plaintext = unpad_pkcs7(aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector)) try: return plaintext.decode('utf-8') except UnicodeDecodeError: logger.warning('failed to decrypt cookie (AES-CBC) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True) return None def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger): try: plaintext = aes_gcm_decrypt_and_verify_bytes(ciphertext, key, authentication_tag, nonce) except ValueError: logger.warning('failed to decrypt cookie (AES-GCM) because the MAC check failed. Possibly the key is wrong?', only_once=True) return None try: return plaintext.decode('utf-8') except UnicodeDecodeError: logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True) return None def _decrypt_windows_dpapi(ciphertext, logger): """ References: - https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata """ from ctypes.wintypes import DWORD class DATA_BLOB(ctypes.Structure): _fields_ = [('cbData', DWORD), ('pbData', ctypes.POINTER(ctypes.c_char))] buffer = ctypes.create_string_buffer(ciphertext) blob_in = DATA_BLOB(ctypes.sizeof(buffer), buffer) blob_out = DATA_BLOB() ret = ctypes.windll.crypt32.CryptUnprotectData( ctypes.byref(blob_in), # pDataIn None, # ppszDataDescr: human readable description of pDataIn None, # pOptionalEntropy: salt? None, # pvReserved: must be NULL None, # pPromptStruct: information about prompts to display 0, # dwFlags ctypes.byref(blob_out) # pDataOut ) if not ret: logger.warning('failed to decrypt with DPAPI', only_once=True) return None result = ctypes.string_at(blob_out.pbData, blob_out.cbData) ctypes.windll.kernel32.LocalFree(blob_out.pbData) return result def _config_home(): return os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')) def _open_database_copy(database_path, tmpdir): # cannot open sqlite databases if they are already in use (e.g. by the browser) database_copy_path = os.path.join(tmpdir, 'temporary.sqlite') shutil.copy(database_path, database_copy_path) conn = sqlite3.connect(database_copy_path) return conn.cursor() def _get_column_names(cursor, table_name): table_info = cursor.execute(f'PRAGMA table_info({table_name})').fetchall() return [row[1].decode('utf-8') for row in table_info] def _find_most_recently_used_file(root, filename, logger): # if there are multiple browser profiles, take the most recently used one i, paths = 0, [] with _create_progress_bar(logger) as progress_bar: for curr_root, dirs, files in os.walk(root): for file in files: i += 1 progress_bar.print(f'Searching for "{filename}": {i: 6d} files searched') if file == filename: paths.append(os.path.join(curr_root, file)) return None if not paths else max(paths, key=lambda path: os.lstat(path).st_mtime) def _merge_cookie_jars(jars): output_jar = YoutubeDLCookieJar() for jar in jars: for cookie in jar: output_jar.set_cookie(cookie) if jar.filename is not None: output_jar.filename = jar.filename return output_jar def _is_path(value): return os.path.sep in value def _parse_browser_specification(browser_name, profile=None, keyring=None): if browser_name not in SUPPORTED_BROWSERS: raise ValueError(f'unsupported browser: "{browser_name}"') if keyring not in (None, *SUPPORTED_KEYRINGS): raise ValueError(f'unsupported keyring: "{keyring}"') if profile is not None and _is_path(profile): profile = os.path.expanduser(profile) return browser_name, profile, keyring
39.646941
134
0.664921
4,837
39,528
5.223279
0.143891
0.008549
0.008708
0.014407
0.426598
0.360736
0.304255
0.275519
0.227271
0.189155
0
0.00875
0.236718
39,528
996
135
39.686747
0.828643
0.121711
0
0.324622
0
0.004127
0.167538
0.023731
0
0
0.000174
0
0.001376
1
0.085282
false
0.033012
0.028886
0.01238
0.277854
0.019257
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a4c2b7fd420450ab300e48488a392b1a4cf50ef
1,205
py
Python
hgtools/tests/conftest.py
jaraco/hgtools
1090d139e5dbdab864da8f1917a9e674331b6f9b
[ "MIT" ]
1
2017-05-17T15:12:29.000Z
2017-05-17T15:12:29.000Z
hgtools/tests/conftest.py
jaraco/hgtools
1090d139e5dbdab864da8f1917a9e674331b6f9b
[ "MIT" ]
12
2016-01-01T14:43:44.000Z
2021-10-03T02:13:19.000Z
hgtools/tests/conftest.py
jaraco/hgtools
1090d139e5dbdab864da8f1917a9e674331b6f9b
[ "MIT" ]
null
null
null
import os import pytest from hgtools import managers def _ensure_present(mgr): try: mgr.version() except Exception: pytest.skip() @pytest.fixture def tmpdir_as_cwd(tmpdir): with tmpdir.as_cwd(): yield tmpdir @pytest.fixture def hg_repo(tmpdir_as_cwd): mgr = managers.MercurialManager() _ensure_present(mgr) mgr._invoke('init', '.') os.makedirs('bar') touch('bar/baz') mgr._invoke('addremove') mgr._invoke('ci', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('ci', '-m', 'added content') return tmpdir_as_cwd @pytest.fixture def git_repo(tmpdir_as_cwd): mgr = managers.GitManager() _ensure_present(mgr) mgr._invoke('init') mgr._invoke('config', 'user.email', '[email protected]') mgr._invoke('config', 'user.name', 'HGTools') os.makedirs('bar') touch('bar/baz') mgr._invoke('add', '.') mgr._invoke('commit', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('commit', '-am', 'added content') return tmpdir_as_cwd def touch(filename): with open(filename, 'a'): pass
21.517857
62
0.624066
156
1,205
4.634615
0.346154
0.124481
0.091286
0.041494
0.473029
0.473029
0.240664
0.240664
0.149378
0.149378
0
0
0.209129
1,205
55
63
21.909091
0.758657
0
0
0.348837
0
0
0.165145
0
0
0
0
0
0
1
0.116279
false
0.023256
0.069767
0
0.232558
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a4fee7da31280c4ead726e734baac5bb3fc023e
1,227
py
Python
setup.py
dantas/wifi
e9cd6df7d3411f1532843999f6c33f45369c3fe4
[ "BSD-2-Clause" ]
1
2019-04-29T14:57:45.000Z
2019-04-29T14:57:45.000Z
setup.py
dantas/wifi
e9cd6df7d3411f1532843999f6c33f45369c3fe4
[ "BSD-2-Clause" ]
null
null
null
setup.py
dantas/wifi
e9cd6df7d3411f1532843999f6c33f45369c3fe4
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python from setuptools import setup import os __doc__ = """ Command line tool and library wrappers around iwlist and /etc/network/interfaces. """ def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() install_requires = [ 'setuptools', 'pbkdf2', ] try: import argparse except: install_requires.append('argparse') version = '1.0.0' setup( name='wifi', version=version, author='Rocky Meza, Gavin Wahl', author_email='[email protected]', description=__doc__, long_description=read('README.rst'), packages=['wifi'], scripts=['bin/wifi'], test_suite='tests', platforms=["Debian"], license='BSD', install_requires=install_requires, classifiers=[ "License :: OSI Approved :: BSD License", "Topic :: System :: Networking", "Operating System :: POSIX :: Linux", "Environment :: Console", "Programming Language :: Python", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.3", ], data_files=[ ('/etc/bash_completion.d/', ['extras/wifi-completion.bash']), ] )
23.150943
70
0.625102
134
1,227
5.567164
0.649254
0.080429
0.134048
0.069705
0
0
0
0
0
0
0
0.010449
0.220049
1,227
52
71
23.596154
0.76907
0.0163
0
0
0
0
0.420398
0.06136
0
0
0
0
0
1
0.022727
false
0
0.068182
0.022727
0.113636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5020fde45eeb5e84a7966a0bf40c59df2eeca7
2,653
py
Python
auth-api/src/auth_api/resources/org_products.py
severinbeauvais/sbc-auth
c98f75ea8970a357c62093b6e9f7deab61ae87c5
[ "Apache-2.0" ]
null
null
null
auth-api/src/auth_api/resources/org_products.py
severinbeauvais/sbc-auth
c98f75ea8970a357c62093b6e9f7deab61ae87c5
[ "Apache-2.0" ]
null
null
null
auth-api/src/auth_api/resources/org_products.py
severinbeauvais/sbc-auth
c98f75ea8970a357c62093b6e9f7deab61ae87c5
[ "Apache-2.0" ]
null
null
null
# Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """API endpoints for managing an Org resource.""" from flask import request from flask_restplus import Namespace, Resource, cors from auth_api import status as http_status from auth_api.exceptions import BusinessException from auth_api.jwt_wrapper import JWTWrapper from auth_api.schemas import ProductSubscriptionSchema from auth_api.schemas import utils as schema_utils from auth_api.services import Product as ProductService from auth_api.tracer import Tracer from auth_api.utils.roles import Role from auth_api.utils.util import cors_preflight API = Namespace('products', description='Endpoints for products management') TRACER = Tracer.get_instance() _JWT = JWTWrapper.get_instance() @cors_preflight('GET,POST,OPTIONS') @API.route('', methods=['GET', 'POST', 'OPTIONS']) class OrgProducts(Resource): """Resource for managing product subscriptions.""" @staticmethod @TRACER.trace() @cors.crossdomain(origin='*') @_JWT.has_one_of_roles([Role.STAFF_CREATE_ACCOUNTS.value]) def post(org_id): """Post a new product subscription to the org using the request body.""" request_json = request.get_json() valid_format, errors = schema_utils.validate(request_json, 'org_product_subscription') if not valid_format: return {'message': schema_utils.serialize(errors)}, http_status.HTTP_400_BAD_REQUEST try: subscriptions = ProductService.create_product_subscription(org_id, request_json) if subscriptions is None: response, status = {'message': 'Not authorized to perform this action'}, \ http_status.HTTP_401_UNAUTHORIZED else: response, status = {'subscriptions': ProductSubscriptionSchema().dump(subscriptions, many=True)}, \ http_status.HTTP_201_CREATED except BusinessException as exception: response, status = {'code': exception.code, 'message': exception.message}, exception.status_code return response, status
42.790323
115
0.724463
335
2,653
5.59403
0.447761
0.03842
0.052828
0.017076
0.025614
0
0
0
0
0
0
0.007948
0.193743
2,653
61
116
43.491803
0.867695
0.272899
0
0
0
0
0.089953
0.012625
0
0
0
0
0
1
0.027027
false
0
0.297297
0
0.405405
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a507bf3c4b912bd7ed181d618b52de6b0f464f2
3,749
py
Python
TWLight/applications/management/commands/send_coordinator_reminders.py
nicole331/TWLight
fab9002e76868f8a2ef36f9279c777de34243b2c
[ "MIT" ]
67
2017-12-14T22:27:48.000Z
2022-03-13T18:21:31.000Z
TWLight/applications/management/commands/send_coordinator_reminders.py
nicole331/TWLight
fab9002e76868f8a2ef36f9279c777de34243b2c
[ "MIT" ]
433
2017-03-24T22:51:23.000Z
2022-03-31T19:36:22.000Z
TWLight/applications/management/commands/send_coordinator_reminders.py
Mahuton/TWLight
90b299d07b0479f21dc90e17b8d05f5a221b0de1
[ "MIT" ]
105
2017-06-23T03:53:41.000Z
2022-03-30T17:24:29.000Z
import logging from collections import Counter from django.core.management.base import BaseCommand from django.db.models import Q from TWLight.applications.models import Application from TWLight.resources.models import Partner from TWLight.applications.signals import Reminder from TWLight.users.models import Editor logger = logging.getLogger(__name__) class Command(BaseCommand): def handle(self, *args, **options): # This is not DRY. Originally, this pulled the queryset from # TWLight.applications.views.ListApplicationsView.get_queryset(). # But that now expects a request object. So, we did a copy/paste. # We're actually getting apps with a status of PENDING or QUESTION # or APPROVED, and their corresponding user preferences being True # for partners with a status of AVAILABLE. all_apps = ( Application.objects.filter( Q( partner__coordinator__editor__user__userprofile__pending_app_reminders=True ) & Q(status=Application.PENDING) | Q( partner__coordinator__editor__user__userprofile__discussion_app_reminders=True ) & Q(status=Application.QUESTION) | Q( partner__coordinator__editor__user__userprofile__approved_app_reminders=True ) & Q(status=Application.APPROVED), partner__status__in=[Partner.AVAILABLE], editor__isnull=False, ) .exclude(editor__user__groups__name="restricted") .order_by("status", "partner", "date_created") ) # A deduplicated dict of coordinators from the pending app queryset, along # with a count of how many total pending apps they have coordinators = Counter( all_apps.values_list( "partner__coordinator__editor", "partner__coordinator__email", "partner__coordinator__editor__user__userprofile__lang", ) ) for coordinator, count in list(coordinators.items()): try: # We create a dictionary with the three status codes # we'd want to send emails for, and their corresponding # counts. app_status_and_count = { Application.PENDING: all_apps.filter( status=Application.PENDING, partner__coordinator__editor=coordinator[0], ).count(), Application.QUESTION: all_apps.filter( status=Application.QUESTION, partner__coordinator__editor=coordinator[0], ).count(), Application.APPROVED: all_apps.filter( status=Application.APPROVED, partner__coordinator__editor=coordinator[0], ).count(), } editor = Editor.objects.get(id=coordinator[0]) except Editor.DoesNotExist: logger.info( "Editor {} does not exist; skipping.".format(coordinator[0]) ) break # Only bother with the signal if we have a coordinator email. if coordinator[1]: Reminder.coordinator_reminder.send( sender=self.__class__, app_status_and_count=app_status_and_count, coordinator_wp_username=editor.wp_username, coordinator_email=coordinator[1], coordinator_lang=coordinator[2], )
43.593023
98
0.584956
357
3,749
5.834734
0.389356
0.077772
0.092175
0.053769
0.238118
0.176188
0.049928
0
0
0
0
0.003288
0.351027
3,749
85
99
44.105882
0.852857
0.175247
0
0.117647
0
0
0.057811
0.035076
0
0
0
0
0
1
0.014706
false
0
0.117647
0
0.147059
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a509772d4b71309e020c912aabb38728c706128
15,176
py
Python
python/3D-rrt/pvtrace/LightSources.py
rapattack88/mcclanahoochie
6df72553ba954b52e949a6847a213b22f9e90157
[ "Apache-2.0" ]
1
2020-12-27T21:37:35.000Z
2020-12-27T21:37:35.000Z
python/3D-rrt/pvtrace/LightSources.py
rapattack88/mcclanahoochie
6df72553ba954b52e949a6847a213b22f9e90157
[ "Apache-2.0" ]
null
null
null
python/3D-rrt/pvtrace/LightSources.py
rapattack88/mcclanahoochie
6df72553ba954b52e949a6847a213b22f9e90157
[ "Apache-2.0" ]
null
null
null
# pvtrace is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pvtrace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np from external.transformations import translation_matrix, rotation_matrix import external.transformations as tf from Trace import Photon from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm from Materials import Spectrum def random_spherecial_vector(): # This method of calculating isotropic vectors is taken from GNU Scientific Library LOOP = True while LOOP: x = -1. + 2. * np.random.uniform() y = -1. + 2. * np.random.uniform() s = x**2 + y**2 if s <= 1.0: LOOP = False z = -1. + 2. * s a = 2 * np.sqrt(1 - s) x = a * x y = a * y return np.array([x,y,z]) class SimpleSource(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False): super(SimpleSource, self).__init__() self.position = position self.direction = direction self.wavelength = wavelength self.use_random_polarisation = use_random_polarisation self.throw = 0 self.source_id = "SimpleSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength # If use_polarisation is set generate a random polarisation vector of the photon if self.use_random_polarisation: # Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon vec = random_spherecial_vector() vec[2] = 0. vec = norm(vec) R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1]) photon.polarisation = transform_direction(vec, R) else: photon.polarisation = None photon.id = self.throw self.throw = self.throw + 1 return photon class Laser(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None): super(Laser, self).__init__() self.position = np.array(position) self.direction = np.array(direction) self.wavelength = wavelength assert polarisation != None, "Polarisation of the Laser is not set." self.polarisation = np.array(polarisation) self.throw = 0 self.source_id = "LaserSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength photon.polarisation = self.polarisation photon.id = self.throw self.throw = self.throw + 1 return photon class PlanarSource(object): """A box that emits photons from the top surface (normal), sampled from the spectrum.""" def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05): super(PlanarSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.plane = FinitePlane(length=length, width=width) self.length = length self.width = width # direction is the direction that photons are fired out of the plane in the GLOBAL FRAME. # i.e. this is passed directly to the photon to set is's direction self.direction = direction self.throw = 0 self.source_id = "PlanarSource_" + str(id(self)) def translate(self, translation): self.plane.append_transform(tf.translation_matrix(translation)) def rotate(self, angle, axis): self.plane.append_transform(tf.rotation_matrix(angle, axis)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Create a point which is on the surface of the finite plane in it's local frame x = np.random.uniform(0., self.length) y = np.random.uniform(0., self.width) local_point = (x, y, 0.) # Transform the direciton photon.position = transform_point(local_point, self.plane.transform) photon.direction = self.direction photon.active = True if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class LensSource(object): """ A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. """ def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)): super(LensSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.planeorigin = planeorigin self.planeextent = planeextent self.linepoint = np.array(linepoint) self.linedirection = np.array(linedirection) self.focussize = focussize self.throw = 0 self.source_id = "LensSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Position x = np.random.uniform(self.planeorigin[0],self.planeextent[0]) y = np.random.uniform(self.planeorigin[1],self.planeextent[1]) z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) photon.position = np.array((x,y,z)) # Direction focuspoint = np.array((0.,0.,0.)) focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize) focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize) focuspoint[2] = photon.position[2] direction = focuspoint - photon.position modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5 photon.direction = direction/modulus # Wavelength if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class LensSourceAngle(object): """ A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. For this lense an additional z-boost is added (Angle of incidence in z-direction). """ def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), angle = 0, focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)): super(LensSourceAngle, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.planeorigin = planeorigin self.planeextent = planeextent self.linepoint = np.array(linepoint) self.linedirection = np.array(linedirection) self.focussize = focussize self.angle = angle self.throw = 0 self.source_id = "LensSourceAngle_" + str(id(self)) def photon(self): photon = Photon() photon.id = self.throw self.throw = self.throw + 1 # Position x = np.random.uniform(self.planeorigin[0],self.planeextent[0]) y = np.random.uniform(self.planeorigin[1],self.planeextent[1]) boost = y*np.tan(self.angle) z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) - boost photon.position = np.array((x,y,z)) # Direction focuspoint = np.array((0.,0.,0.)) focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize) focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize) focuspoint[2] = photon.position[2] + boost direction = focuspoint - photon.position modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5 photon.direction = direction/modulus # Wavelength if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class CylindricalSource(object): """ A source for photons emitted in a random direction and position inside a cylinder(radius, length) """ def __init__(self, spectrum = None, wavelength = 555, radius = 1, length = 10): super(CylindricalSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.shape = Cylinder(radius = radius, length = length) self.radius = radius self.length = length self.throw = 0 self.source_id = "CylindricalSource_" + str(id(self)) def translate(self, translation): self.shape.append_transform(tf.translation_matrix(translation)) def rotate(self, angle, axis): self.shape.append_transform(tf.rotation_matrix(angle, axis)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Position of emission phi = np.random.uniform(0., 2*np.pi) r = np.random.uniform(0.,self.radius) x = r*np.cos(phi) y = r*np.sin(phi) z = np.random.uniform(0.,self.length) local_center = (x,y,z) photon.position = transform_point(local_center, self.shape.transform) # Direction of emission (no need to transform if meant to be isotropic) phi = np.random.uniform(0.,2*np.pi) theta = np.random.uniform(0.,np.pi) x = np.cos(phi)*np.sin(theta) y = np.sin(phi)*np.sin(theta) z = np.cos(theta) local_direction = (x,y,z) photon.direction = local_direction # Set wavelength of photon if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength # Further initialisation photon.active = True return photon class PointSource(object): """ A point source that emits randomly in solid angle specified by phimin, ..., thetamax """ def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi): super(PointSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.center = center self.phimin = phimin self.phimax = phimax self.thetamin = thetamin self.thetamax = thetamax self.throw = 0 self.source_id = "PointSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 phi = np.random.uniform(self.phimin, self.phimax) theta = np.random.uniform(self.thetamin, self.thetamax) x = np.cos(phi)*np.sin(theta) y = np.sin(phi)*np.sin(theta) z = np.cos(theta) direction = (x,y,z) transform = tf.translation_matrix((0,0,0)) point = transform_point(self.center, transform) photon.direction = direction photon.position = point if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength photon.active = True return photon class RadialSource(object): """ A point source that emits at discrete angles theta(i) and phi(i) """ def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi, spacing=20): super(RadialSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.center = center self.phimin = phimin self.phimax = phimax self.thetamin = thetamin self.thetamax = thetamax self.spacing = spacing self.throw = 0 self.source_id = "RadialSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 intphi = np.random.randint(1, self.spacing+1) inttheta = np.random.randint(1, self.spacing+1) phi = intphi*(self.phimax-self.phimin)/self.spacing if self.thetamin == self.thetamax: theta = self.thetamin else: theta = inttheta*(self.thetamax-self.thetamin)/self.spacing x = np.cos(phi)*np.sin(theta) y = np.sin(phi)*np.sin(theta) z = np.cos(theta) direction = (x,y,z) transform = tf.translation_matrix((0,0,0)) point = transform_point(self.center, transform) photon.direction = direction photon.position = point if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength photon.active = True return photon
38.035088
176
0.61136
1,832
15,176
4.985262
0.13155
0.031534
0.044345
0.031534
0.665608
0.633089
0.597613
0.576152
0.562137
0.546151
0
0.017419
0.285055
15,176
398
177
38.130653
0.824332
0.152412
0
0.630435
0
0
0.011389
0
0
0
0
0
0.003623
1
0.076087
false
0
0.021739
0
0.15942
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a50da9ca339ad2c3e097b9d78b1dde9a457d80b
1,037
py
Python
plot_user_activity.py
KanayBhandari/discord_bot_project
4baa62c963c532b08060689bed872e36e72460f9
[ "MIT" ]
null
null
null
plot_user_activity.py
KanayBhandari/discord_bot_project
4baa62c963c532b08060689bed872e36e72460f9
[ "MIT" ]
null
null
null
plot_user_activity.py
KanayBhandari/discord_bot_project
4baa62c963c532b08060689bed872e36e72460f9
[ "MIT" ]
null
null
null
import discord import random from datetime import datetime import pandas as pd import matplotlib.pyplot as plt import csv async def plot_user_activity(client, ctx): plt.style.use('fivethirtyeight') df = pd.read_csv('innovators.csv', encoding= 'unicode_escape') author = df['author'].to_list() message_counter = {} for i in author: if i in message_counter: message_counter[i] += 1 else: message_counter[i] = 1 # for not mentioning the bot in the line graph. message_counter.pop('ninza_bot_test') authors_in_discord = list(message_counter.keys()) no_of_messages = list(message_counter.values()) plt.plot(authors_in_discord, no_of_messages, marker = 'o', markersize=10) plt.title('msg sent by author in the server.') plt.xlabel('Author') plt.ylabel('Message_count') plt.savefig('output2.png') plt.tight_layout() plt.close() await ctx.send(file = discord.File('output2.png'))
26.589744
78
0.649952
138
1,037
4.710145
0.543478
0.150769
0.083077
0.049231
0
0
0
0
0
0
0
0.007653
0.243973
1,037
38
79
27.289474
0.821429
0.043394
0
0
0
0
0.14511
0
0
0
0
0
0
1
0
false
0
0.222222
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a50f54c898793f1acb00252a2b2f5ed4e326667
790
py
Python
setup.py
skojaku/fastnode2vec
bb65f68469f00f489fa6744d35b8756200b4e285
[ "MIT" ]
61
2020-04-21T18:58:47.000Z
2022-03-26T22:41:45.000Z
setup.py
skojaku/fastnode2vec
bb65f68469f00f489fa6744d35b8756200b4e285
[ "MIT" ]
17
2020-04-21T22:37:17.000Z
2022-03-31T22:36:03.000Z
setup.py
skojaku/fastnode2vec
bb65f68469f00f489fa6744d35b8756200b4e285
[ "MIT" ]
6
2020-07-30T01:41:59.000Z
2022-01-19T10:13:01.000Z
#!/usr/bin/env python3 import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name="fastnode2vec", version="0.0.5", author="Louis Abraham", license="MIT", author_email="[email protected]", description="Fast implementation of node2vec", long_description=read("README.md"), long_description_content_type="text/markdown", url="https://github.com/louisabraham/fastnode2vec", packages=["fastnode2vec"], install_requires=["numpy", "numba", "gensim", "click", "tqdm"], python_requires=">=3.6", entry_points={"console_scripts": ["fastnode2vec = fastnode2vec.cli:node2vec"]}, classifiers=["Topic :: Scientific/Engineering :: Artificial Intelligence"], )
29.259259
83
0.694937
91
790
5.89011
0.769231
0.022388
0
0
0
0
0
0
0
0
0
0.019118
0.139241
790
26
84
30.384615
0.769118
0.026582
0
0
0
0
0.39974
0.089844
0
0
0
0
0
1
0.05
false
0
0.1
0.05
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a522d6b63aaba15a267dd005faa4b6a9a20a8c0
946
py
Python
Chapter07/library/check_user_py3.py
djouani/Learning-Ansible-2.X-Third-Edition
34d6745c2bde8367ad2db7c9343bc8477b0643d7
[ "MIT" ]
22
2019-04-22T02:13:39.000Z
2021-10-01T22:03:51.000Z
Chapter07/library/check_user_py3.py
djouani/Learning-Ansible-2.X-Third-Edition
34d6745c2bde8367ad2db7c9343bc8477b0643d7
[ "MIT" ]
1
2019-12-12T20:22:43.000Z
2020-08-30T17:13:00.000Z
Chapter07/library/check_user_py3.py
djouani/Learning-Ansible-2.X-Third-Edition
34d6745c2bde8367ad2db7c9343bc8477b0643d7
[ "MIT" ]
25
2019-04-09T04:29:50.000Z
2021-12-22T22:17:14.000Z
#!/usr/bin/env python import pwd from ansible.module_utils.basic import AnsibleModule class User: def __init__(self, user): self.user = user # Check if user exists def check_if_user_exists(self): try: user = pwd.getpwnam(self.user) success = True ret_msg = 'User %s exists' % self.user except KeyError: success = False ret_msg = 'User %s does not exists' % self.user return success, ret_msg def main(): # Parsing argument file module = AnsibleModule( argument_spec = dict( user = dict(required=True) ) ) user = module.params.get('user') chkusr = User(user) success, ret_msg = chkusr.check_if_user_exists() # Error handling and JSON return if success: module.exit_json(msg=ret_msg) else: module.fail_json(msg=ret_msg) if __name__ == "__main__": main()
23.073171
59
0.599366
118
946
4.567797
0.432203
0.06679
0.061224
0.09462
0
0
0
0
0
0
0
0
0.308668
946
40
60
23.65
0.824159
0.099366
0
0
0
0
0.057783
0
0
0
0
0
0
1
0.103448
false
0
0.068966
0
0.241379
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5244b8a475e9cca6ead4f4463ced1af6491956
17,945
py
Python
survey/api/matrix.py
djaodjin/djaodjin-survey
a6eb8a577fecd219850478c245d9ebe990438a64
[ "BSD-2-Clause" ]
15
2015-03-12T18:14:50.000Z
2022-03-26T10:16:55.000Z
survey/api/matrix.py
djaodjin/djaodjin-survey
a6eb8a577fecd219850478c245d9ebe990438a64
[ "BSD-2-Clause" ]
19
2015-03-31T20:48:08.000Z
2022-03-30T17:31:49.000Z
survey/api/matrix.py
djaodjin/djaodjin-survey
a6eb8a577fecd219850478c245d9ebe990438a64
[ "BSD-2-Clause" ]
4
2015-12-16T20:53:34.000Z
2017-12-20T19:50:42.000Z
# Copyright (c) 2020, DjaoDjin inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging, re from collections import OrderedDict from django.db.models import F from django.http import Http404 from django.shortcuts import get_object_or_404 from extra_views.contrib.mixins import SearchableListMixin from rest_framework import generics from rest_framework.pagination import PageNumberPagination from rest_framework import response as http from ..compat import reverse from ..mixins import MatrixMixin from ..models import Answer, Matrix, EditableFilter from ..utils import (get_account_model, get_account_serializer, get_question_serializer) from .serializers import EditableFilterSerializer, MatrixSerializer LOGGER = logging.getLogger(__name__) class MatrixCreateAPIView(generics.ListCreateAPIView): """ Filtered list of ``Question``. **Examples**: .. code-block:: http GET /api/matrix/ Response: { "slug": "all", "title": "All accounts against all questions", "metric": { "slug": "all-questions", "title": "All questions", "predicates": [] }, "cohorts": [{ "slug": "all-accounts", "title": "All accounts", "predicates": [] }] } .. code-block:: http POST /api/matrix/ { "slug": "all", "title": "All accounts against all questions", "metric": { "slug": "all-questions", "title": "All questions", "predicates": [] }, "cohorts": [{ "slug": "all-accounts", "title": "All accounts", "predicates": [] }] } Response: 201 CREATED { "slug": "all", "title": "All accounts against all questions", "metric": { "slug": "all-questions", "title": "All questions", "predicates": [] }, "cohorts": [{ "slug": "all-accounts", "title": "All accounts", "predicates": [] }] } """ serializer_class = MatrixSerializer def get_queryset(self): return Matrix.objects.all() class MatrixDetailAPIView(MatrixMixin, generics.RetrieveUpdateDestroyAPIView): """ A table of scores for cohorts aganist a metric. **Examples**: .. code-block:: http GET /api/matrix/languages Response: [{ "slug": "languages", "title": "All cohorts for all questions" "scores":{ "portfolio-a": "0.1", "portfolio-b": "0.5", } }] """ serializer_class = MatrixSerializer lookup_field = 'slug' lookup_url_kwarg = 'path' question_model = get_question_serializer().Meta.model def aggregate_scores(self, metric, cohorts, cut=None, accounts=None): #pylint:disable=unused-argument,too-many-locals if accounts is None: accounts = get_account_model().objects.all() scores = {} if metric: assert 'metric' in metric.tags, \ "filter '%s' is not tagged as a metric" % str(metric) includes, excludes = metric.as_kwargs() questions = self.question_model.objects.filter( **includes).exclude(**excludes) nb_questions = len(questions) if nb_questions > 0: for cohort in cohorts: if isinstance(cohort, EditableFilter): includes, excludes = cohort.as_kwargs() qs_accounts = accounts.filter( **includes).exclude(**excludes) else: # If `matrix.cohorts is None`, the `cohorts` argument # will be a list of single account objects. qs_accounts = [cohort] nb_accounts = len(qs_accounts) if nb_accounts > 0: nb_correct_answers = Answer.objects.filter( question__in=questions, sample__account__in=qs_accounts).filter( measured=F('question__correct_answer')).count() score = nb_correct_answers * 100 / ( nb_questions * nb_accounts) LOGGER.debug("score for '%s' = (%d * 100) "\ "/ (%d * %d) = %f", str(cohort), nb_correct_answers, nb_questions, nb_accounts, score) assert score <= 100 scores.update({str(cohort): score}) return {"scores": scores} @property def matrix(self): if not hasattr(self, '_matrix'): self._matrix = Matrix.objects.filter( slug=self.kwargs.get(self.matrix_url_kwarg)).first() return self._matrix def get_accounts(self): #pylint:disable=unused-argument,no-self-use return get_account_model().objects.all() def get_likely_metric(self, cohort_slug): """ Returns a URL to a ``Matrix`` derived from *cohort*. Many times people will use the same name to either mean a cohort or a metric and expect the system will magically switch between both meaning. This is an attempt at magic. """ likely_metric = None look = re.match(r"(\S+)(-\d+)", cohort_slug) if look: try: likely_metric = self.request.build_absolute_uri( reverse('matrix_chart', args=( EditableFilter.objects.get(slug=look.group(1)).slug,))) except EditableFilter.DoesNotExist: pass return likely_metric def get(self, request, *args, **kwargs): #pylint:disable=unused-argument,too-many-locals matrix = self.matrix if matrix: metric = self.matrix.metric else: parts = self.kwargs.get(self.matrix_url_kwarg).split('/') metric = get_object_or_404(EditableFilter, slug=parts[-1]) matrix = Matrix.objects.filter(slug=parts[0]).first() if not matrix: raise Http404() cohort_serializer = EditableFilterSerializer cohorts = matrix.cohorts.exclude(tags__contains='aggregate') public_cohorts = matrix.cohorts.filter(tags__contains='aggregate') cut = matrix.cut if not cohorts: # We don't have any cohorts, let's show individual accounts instead. if cut: includes, excludes = cut.as_kwargs() accounts = self.get_accounts().filter( **includes).exclude(**excludes) else: accounts = self.get_accounts() cohort_serializer = get_account_serializer() # Implementation Note: switch cohorts from an queryset # of `EditableFilter` to a queryset of `Account` ... cohorts = accounts result = [] scores = {} val = { 'slug': metric.slug, 'title': metric.title, 'metric': EditableFilterSerializer().to_representation(metric), 'cut': EditableFilterSerializer().to_representation(cut), 'cohorts': cohort_serializer(many=True).to_representation(cohorts)} # In some case, a metric and cohort have a connection # and could have the same name. for cohort in val['cohorts']: likely_metric = self.get_likely_metric(cohort['slug']) if likely_metric: cohort['likely_metric'] = likely_metric scores.update(val) scores.update({"values": self.aggregate_scores( metric, cohorts, cut, accounts=self.get_accounts())}) result += [scores] if public_cohorts: public_scores = {} public_scores.update(val) public_scores.update( {"cohorts": EditableFilterSerializer( public_cohorts, many=True).data, "values": self.aggregate_scores(metric, public_cohorts)}) result += [public_scores] return http.Response(result) class EditableFilterQuerysetMixin(object): @staticmethod def get_queryset(): return EditableFilter.objects.all() class EditableFilterListAPIView(SearchableListMixin, EditableFilterQuerysetMixin, generics.ListCreateAPIView): """ List fitlers **Tags**: survey **Examples** .. code-block:: http GET /api/xia/matrix/filters/ HTTP/1.1 responds .. code-block:: json { "count": 2, previous: null, next: null, results: [ { "slug": "all", "title": "All", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" }, { "slug": "none", "title": "None", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" } ] } """ search_fields = ['tags'] serializer_class = EditableFilterSerializer def post(self, request, *args, **kwargs): """ Create a fitler **Tags**: survey **Examples** .. code-block:: http POST /api/xia/matrix/filters/ HTTP/1.1 responds .. code-block:: json { "count": 2, previous: null, next: null, results: [ { "slug": "all", "title": "All", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" }, { "slug": "none", "title": "None", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" } ] } """ #pylint:disable=useless-super-delegation return super(EditableFilterListAPIView, self).post( request, *args, **kwargs) class EditableFilterDetailAPIView(generics.RetrieveUpdateDestroyAPIView): """ Retrieve a fitler **Tags**: survey **Examples** .. code-block:: http GET /api/xia/matrix/filters/all/ HTTP/1.1 responds .. code-block:: json { "slug": "all", "title": "All", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" } """ serializer_class = EditableFilterSerializer lookup_field = 'slug' lookup_url_kwarg = 'editable_filter' def get_queryset(self): return EditableFilter.objects.all() def put(self, request, *args, **kwargs): """ Updates a fitler **Tags**: survey **Examples** .. code-block:: http PUT /api/xia/matrix/filters/all/ HTTP/1.1 .. code-block:: json { "slug": "all", "title": "All", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" } responds .. code-block:: json { "slug": "all", "title": "All", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" } """ #pylint:disable=useless-super-delegation return super(EditableFilterDetailAPIView, self).put( request, *args, **kwargs) def delete(self, request, *args, **kwargs): """ Deletes a fitler **Tags**: survey **Examples** .. code-block:: http DELETE /api/xia/matrix/filters/all/ HTTP/1.1 """ #pylint:disable=useless-super-delegation return super(EditableFilterDetailAPIView, self).delete( request, *args, **kwargs) class EditableFilterPagination(PageNumberPagination): def paginate_queryset(self, queryset, request, view=None): self.editable_filter = view.editable_filter return super(EditableFilterPagination, self).paginate_queryset( queryset, request, view=view) def get_paginated_response(self, data): return http.Response(OrderedDict([ ('editable_filter', EditableFilterSerializer().to_representation( self.editable_filter)), ('count', self.page.paginator.count), ('next', self.get_next_link()), ('previous', self.get_previous_link()), ('results', data) ])) class EditableFilterObjectsAPIView(generics.ListAPIView): """ List filter objects **Tags**: survey **Examples** .. code-block:: http GET /api/xia/matrix/filters/ HTTP/1.1 responds .. code-block:: json { "created_at": "2020-01-01T00:00:00Z", "measured": 12 } """ pagination_class = EditableFilterPagination serializer_class = None # override in subclasses lookup_field = 'slug' lookup_url_kwarg = 'editable_filter' def get_queryset(self): return self.get_serializer_class().Meta.model.objects.all() def get(self, request, *args, **kwargs): #pylint: disable=unused-argument self.editable_filter = generics.get_object_or_404( EditableFilter.objects.all(), slug=self.kwargs[self.lookup_url_kwarg]) return super(EditableFilterObjectsAPIView, self).get( request, *args, **kwargs) class AccountListAPIView(EditableFilterObjectsAPIView): """ Filtered list of ``EditableFilter``. **Examples**: .. code-block:: http GET /api/questions/languages Response: { "slug": "languages", "title": "All questions related to languages" "predicates":[{ "operator": "contains", "operand": "language", "field": "text", "selector":"keepmatching" }] } """ serializer_class = get_account_serializer() class QuestionListAPIView(EditableFilterObjectsAPIView): """ Filtered list of ``Question``. **Examples**: .. code-block:: http GET /api/questions/languages Response: { "slug": "languages", "title": "All questions related to languages" "predicates":[{ "operator": "contains", "operand": "language", "field": "text", "selector":"keepmatching" }] } """ serializer_class = get_question_serializer()
29.958264
80
0.51017
1,558
17,945
5.770218
0.220154
0.017019
0.015907
0.023359
0.353504
0.31535
0.298776
0.281646
0.260957
0.248943
0
0.007095
0.379549
17,945
598
81
30.008361
0.800341
0.445361
0
0.156425
0
0
0.038207
0.002839
0
0
0
0
0.011173
1
0.083799
false
0.005587
0.078212
0.03352
0.385475
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5248567c33c615e0f8be2779b082966267e38c
2,740
py
Python
remove_labels.py
iFishy/DomainApp
970ee96450859b1c40a86a9d654beb99c56aa00f
[ "MIT" ]
null
null
null
remove_labels.py
iFishy/DomainApp
970ee96450859b1c40a86a9d654beb99c56aa00f
[ "MIT" ]
null
null
null
remove_labels.py
iFishy/DomainApp
970ee96450859b1c40a86a9d654beb99c56aa00f
[ "MIT" ]
null
null
null
from __future__ import print_function import httplib2 import os import sys import pickle from apiclient import discovery from apiclient import errors from oauth2client import client from oauth2client import tools from oauth2client.file import Storage try: import argparse flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args() except ImportError: flags = None # If modifying these scopes, delete your previously saved credentials # at ~/.credentials/gmail-python-quickstart.json SCOPES = 'https://www.googleapis.com/auth/gmail.labels' CLIENT_SECRET_FILE = 'client_secret.json' APPLICATION_NAME = 'Inbox Organize' def get_credentials(): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential. """ home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') if not os.path.exists(credential_dir): os.makedirs(credential_dir) credential_path = os.path.join(credential_dir, 'gmail-python-quickstart.json') store = Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES) flow.user_agent = APPLICATION_NAME if flags: credentials = tools.run_flow(flow, store, flags) else: # Needed only for compatibility with Python 2.6 credentials = tools.run(flow, store) print('Storing credentials to ' + credential_path) return credentials def GetLabels(service, user_id): try: response = service.users().labels().list(userId=user_id).execute() labels = response['labels'] """ for label in labels: print ('Label id: %s - Label name: %s' % (label['id'], label['name'])) """ return labels except errors.HttpError as error: print ('An error occurred: %s' % error) def DeleteLabel(service, user_id, label_id): try: service.users().labels().delete(userId=user_id, id=label_id).execute() print ('Label with id: %s deleted successfully.' % label_id) except errors.HttpError as error: print ('An error occurred: %s' % error) def main(): credentials = get_credentials() http = credentials.authorize(httplib2.Http()) service = discovery.build('gmail', 'v1', http=http) userId = 'me' labels = GetLabels(service, userId) for label in labels: if (label['type'] == 'user'): print('Deleting label:', label['name']) DeleteLabel(service, userId, label['id']) if __name__ == '__main__': main()
30.444444
79
0.69635
341
2,740
5.469208
0.366569
0.02252
0.020375
0.02681
0.061126
0.061126
0.061126
0.061126
0.061126
0.061126
0
0.00408
0.194891
2,740
89
80
30.786517
0.841342
0.140876
0
0.114754
0
0
0.123866
0.012704
0
0
0
0
0
1
0.065574
false
0
0.196721
0
0.295082
0.098361
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a525a8a02f61d5499bb1c8bffd7e68682942e12
9,990
py
Python
ics2entropiawiki.py
entropia/ics2entropiawiki
d77fa8073c2b18eade1c2b85feaccab8b6598c6b
[ "Apache-2.0" ]
2
2020-01-02T04:52:03.000Z
2020-03-02T04:00:08.000Z
ics2entropiawiki.py
entropia/ics2entropiawiki
d77fa8073c2b18eade1c2b85feaccab8b6598c6b
[ "Apache-2.0" ]
8
2018-11-06T10:05:43.000Z
2021-10-09T20:26:16.000Z
ics2entropiawiki.py
entropia/ics2entropiawiki
d77fa8073c2b18eade1c2b85feaccab8b6598c6b
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ics2entropiawiki Read an ics file with the entropia events and insert them in to the entropia homepage wiki. Example: $ ics2entropiawiki.py --config /etc/ics2entropiawiki/config.ini Inserts events not in the past to the "Termine" Wiki page and appends past events to the "Vergangene_Termine" Site """ import locale import configparser import re import requests from argparse import ArgumentParser from datetime import timedelta, datetime from ics import Calendar from mwclient import Site from dateutil.tz import tzlocal BOTWARNING = """ <!-- This text is automatically generated by the ics2entropiawiki bot, everything you write and everything you edit WILL BE OVERWRITTEN Dieser Text ist vom ics2entropiawiki bot automatisch generiert. Alles was hier manuell editiert, hinzugefügt wird WIRD ÜBERSCHRIEBEN --> """ TABLE_HEADER = """ {| class="termine" border="1" cellspacing="0" cellpadding="5" width="100%" style="border-collapse:collapse;" ! style="width:250px;" | Datum !! style="width:50px;" | Zeit !! Ort !! Beschreibung\ """ ARCHIVE_TABLE_HEADER = """ {| class="termine" border="1" cellspacing="0" cellpadding="5" style="border-collapse:collapse;" width="100%" |width=15%|'''Datum''' |width=6%|'''Zeit''' |width=15%|'''Ort''' |width=69%|'''Beschreibung''' """ TABLE_FOOTER = ( "|}", "\n", "Weitere Links: [[Vorlage:Termine|Termine]] ", "([https://entropia.de/index.php?title=Vorlage:Termine&action=edit Bearbeiten]),", " [[Vorlage:Vergangene_Termine|Vergangene Termine]], [[Anfahrt]]" ) LINE_SEPARATOR = "|-\n" try: locale.setlocale(locale.LC_ALL, 'de_DE.utf8') except locale.Error: pass class EntropiaEvent: """ Parses an ics Event and converts it to an entropia-wiki suitable form """ def __init__(self, event): """ :param event: The event to be evaluated :type event: ics.event.Event """ self.event = event self.begintime = event.begin.datetime.astimezone() self.endtime = event._end_time.datetime.astimezone() @property def location(self): """ Retrieve the location of an event :return: location :rtype: str """ locations = { "entropia": "[[Anfahrt|Entropia]]", } location = " " if self.event.location: location = self.event.location if location.lower() in locations.keys(): location = locations[location.lower()] return location @property def begin_date(self): """ :return: Entropia-Wiki formatted begin time :rtype: str """ return self.begintime.strftime("%a., %d.%m.%Y") @property def end_date(self): """ :return: Entropia-Wiki formatted end time :rtype: str """ end_date = "" if self.endtime - self.begintime > timedelta(days=1): end_date = " - " + self.endtime.strftime("%a., %d.%m.%Y") return end_date @property def days_to_event(self): """ :return: Days to the start of the event :rtype: datetime.timedelta """ return self.endtime - datetime.now(tz=tzlocal()) @property def is_past_event(self): """ :return: Check if the event lies in the past :rtype: bool """ return self.days_to_event < timedelta(days=0) @property def start_time(self): """ :return: The starting time of the event :rtype: str """ start_time = " " if not self.event.all_day: start_time = self.begintime.strftime("%H:%M") return start_time @property def description(self): """ :return: The event's description :rtype: str """ links = None wiki = None event = self.event if event.description: links = re.findall("^[Ll]ink:(.*)$", event.description) wiki = re.findall("^[Ww]iki:(.*)$", event.description) if links and event.name: description = "["+links[0]+" "+event.name+"]" elif wiki: description = wiki[0] elif not event.name: description = "N.A." else: description = event.name return description def __str__(self): """ :return: A wiki line describing the event :rtype: str """ return ("| " + self.begin_date + self.end_date + " || " + self.start_time + " || " + self.location + " || " + self.description ) def append_past_events(past_events, wiki_user, wiki_pw, wiki_archive): """ Append the "new" past events to the wiki archive page :param past_events: the past events that were not added to the events page :type past_events: list :param wiki_user: bot user for the wiki :type wiki_user: str :param wiki_pw: password for the wiki user :type wiki_pw: str :param wiki_archive: archive page :type wiki_archive: str :return: None :rtype: None """ site = Site('entropia.de', path='/') site.login(wiki_user, wiki_pw) page = site.pages[wiki_archive] text = page.text().split('\n') last_table_position = 0 for event in past_events: year_header = "== {} ==".format(event.endtime.strftime('%Y')) for index, txtline in enumerate(text): if txtline == '|}': last_table_position = index if str(event) in text: continue if year_header in text: append_list = ( '\n' + LINE_SEPARATOR + str(event) ) text = text[:last_table_position]+[append_list, ]+text[last_table_position:] else: append_list = ( 3 * '\n' + year_header + ARCHIVE_TABLE_HEADER + '\n' + LINE_SEPARATOR + '\n' + str(event) + '\n|}' ) text = text[:last_table_position+1]+[append_list, ]+text[last_table_position+1:] page.save("\n".join(text)) def get_args(): """ Retrieve arguments from the command line, the config file respectively :return: Parsed arguments from command line, config file :rtype: list """ parser = ArgumentParser() parser.add_argument( "-c", "--config", default="/etc/ics2entropiawiki/config.ini", dest="configfile", help="Configuration file path", metavar="CONFIG" ) parser.add_argument( "-u", "--url", dest="ics_url", help="The URL under which the ICS-file can be retrieved", metavar="URL", ) parser.add_argument( "-f", "--file", dest="local_file", help="Local ics file", metavar="FILE" ) parser.add_argument( "--wiki-user", dest="wiki_user", help="Wiki user", metavar="WIKIUSER" ) parser.add_argument( "--wiki-password", dest="wiki_pw", help="Wiki user's password", metavar="WIKIPW" ) parser.add_argument( "--wiki-page", dest="wiki_page", help='Wiki page', metavar='WIKIPAGE' ) parser.add_argument( "--wiki-archive", dest="wiki_archive", help='Wiki archive', metavar='WIKIARCHIVE' ) parser.add_argument( "-d", "--debug", dest="debug", action="store_true", default=False ) args = parser.parse_args() configfile = args.configfile ics_url = args.ics_url file = args.local_file wiki = { 'user': args.wiki_user, 'pass': args.wiki_pw, 'page': args.wiki_page, 'archive': args.wiki_archive, } debug = args.debug if configfile: config = configparser.ConfigParser() config.read(configfile) try: ics_url = config["default"]["url"] wiki = config["wiki"] except KeyError as error: print("Please have a look at the sample config provided with the package") raise error return ics_url, file, wiki, debug def deradicalise_ical(ics): """ :param ics: input file :type ics: str :return: file with remove radicale_headers """ deradicalised = "" for line in ics.splitlines(): if 'X-RADICALE-NAME:' not in line: deradicalised += "\n"+line return deradicalised def main(): """ :return: None :rtype: None """ ics_url, file, wiki, debug = get_args() event_strings = [] past_events = [] if file: calendar = Calendar(deradicalise_ical(open(file).read())) else: ics_result = requests.get(ics_url) ics_result.encoding = 'utf-8' calendar = Calendar(deradicalise_ical(ics_result.text)) for event in sorted(calendar.events, key=lambda ev: ev.begin): event = EntropiaEvent(event) if not event.is_past_event: event_strings.append( "\n" + LINE_SEPARATOR + str(event) ) else: past_events.append(event) append_past_events(past_events, wiki['user'], wiki['pass'], wiki['archive']) termine = BOTWARNING + "\n" + TABLE_HEADER + "\n" + "".join(event_strings) + "\n" + "".join(TABLE_FOOTER) if debug: print(termine) site = Site('entropia.de', path='/') site.login(wiki['user'], wiki['pass']) page = site.pages[wiki['page']] if termine: page.save(termine, "Terminbot was here") page.purge() if __name__ == '__main__': main()
26.289474
116
0.563063
1,111
9,990
4.940594
0.244824
0.018947
0.024777
0.015303
0.102022
0.073237
0.049189
0.049189
0.035343
0.035343
0
0.006103
0.311111
9,990
379
117
26.358839
0.791485
0.155255
0
0.157258
0
0.016129
0.20402
0.026845
0
0
0
0
0
1
0.052419
false
0.024194
0.03629
0
0.133065
0.008065
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a525fbb996d5472af74df006412af4f27d25fa6
527
py
Python
Arrays/LeftRotation.py
anand722000/algo_ds_101
b3e25ce2b2e47e53024f8d349232b04de2837ce3
[ "MIT" ]
175
2019-12-08T19:48:20.000Z
2022-03-24T07:38:08.000Z
Arrays/LeftRotation.py
anand722000/algo_ds_101
b3e25ce2b2e47e53024f8d349232b04de2837ce3
[ "MIT" ]
40
2019-12-07T08:11:41.000Z
2020-10-09T08:11:22.000Z
Arrays/LeftRotation.py
anand722000/algo_ds_101
b3e25ce2b2e47e53024f8d349232b04de2837ce3
[ "MIT" ]
95
2019-12-07T06:25:31.000Z
2022-03-03T20:12:45.000Z
#!/bin/python3 import math import os import random import re import sys # Complete the rotLeft function below. def rotLeft(a, d): alist = list(a) b = alist[d:]+alist[:d] return b if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') nd = input().split() n = int(nd[0]) d = int(nd[1]) a = list(map(int, input().rstrip().split())) result = rotLeft(a, d) fptr.write(' '.join(map(str, result))) fptr.write('\n') fptr.close()
16.46875
49
0.548387
74
527
3.783784
0.581081
0.057143
0.064286
0
0
0
0
0
0
0
0
0.007895
0.278937
527
31
50
17
0.728947
0.094877
0
0
0
0
0.051802
0
0
0
0
0
0
1
0.052632
false
0
0.263158
0
0.368421
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a541e67401b79ca7c42ad0362d81bb514bab960
947
py
Python
tests/unittests/test_zoo.py
SaizhuoWang/carefree-learn
3bf7b00286cdef556cc00fa2fcba5c390b5b9d20
[ "MIT" ]
null
null
null
tests/unittests/test_zoo.py
SaizhuoWang/carefree-learn
3bf7b00286cdef556cc00fa2fcba5c390b5b9d20
[ "MIT" ]
null
null
null
tests/unittests/test_zoo.py
SaizhuoWang/carefree-learn
3bf7b00286cdef556cc00fa2fcba5c390b5b9d20
[ "MIT" ]
1
2021-01-04T02:23:00.000Z
2021-01-04T02:23:00.000Z
import os import cflearn import platform import unittest from cfdata.tabular import TabularDataset num_jobs = 0 if platform.system() == "Linux" else 2 logging_folder = "__test_zoo__" class TestZoo(unittest.TestCase): @staticmethod def _test_zoo_core(model: str) -> None: x, y = TabularDataset.iris().xy zoo_folder = os.path.join(logging_folder, f"__{model}__") zoo = cflearn.Zoo(model) for key, config in zoo.benchmarks.items(): local_logging_folder = os.path.join(zoo_folder, key) config["logging_folder"] = local_logging_folder m = cflearn.make(model, **config).fit(x, y) cflearn.evaluate(x, y, pipelines=m) cflearn._rmtree(logging_folder) def test_fcnn_zoo(self) -> None: self._test_zoo_core("fcnn") def test_tree_dnn_zoo(self) -> None: self._test_zoo_core("tree_dnn") if __name__ == "__main__": unittest.main()
27.852941
65
0.663147
125
947
4.672
0.448
0.133562
0.056507
0.054795
0.089041
0.089041
0.089041
0
0
0
0
0.002717
0.222809
947
33
66
28.69697
0.790761
0
0
0
0
0
0.06547
0
0
0
0
0
0
1
0.12
false
0
0.2
0
0.36
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a54334c8ec0d2c98a16bb220c95973a631adeb1
3,810
py
Python
unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py
duliodenis/python_master_degree
3ab76838ce2fc1606f28e988a3273dd27122a621
[ "MIT" ]
19
2019-03-14T01:39:32.000Z
2022-02-03T00:36:43.000Z
unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py
duliodenis/python_master_degree
3ab76838ce2fc1606f28e988a3273dd27122a621
[ "MIT" ]
1
2020-04-10T01:01:16.000Z
2020-04-10T01:01:16.000Z
unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py
duliodenis/python_master_degree
3ab76838ce2fc1606f28e988a3273dd27122a621
[ "MIT" ]
5
2019-01-02T20:46:05.000Z
2020-07-08T22:47:48.000Z
# # Data Structures: Linked List Merge Sort: The Conquer Step # Python Techdegree # # Created by Dulio Denis on 3/24/19. # Copyright (c) 2019 ddApps. All rights reserved. # ------------------------------------------------ from linked_list import Node, LinkedList def merge_sort(linked_list): ''' Sorts a linked list in ascending order. - Recuresively divide the linked list into sublists containing a single node - Repeatedly merge the sublists to produce sorted swublists until one remains Returns a sorted linked list. Runs in O(kn log n) time. ''' if linked_list.size() == 1: return linked_list elif linked_list.is_empty(): return linked_list left_half, right_half = split(linked_list) left = merge_sort(left_half) right = merge_sort(right_half) return merge(left, right) def split(linked_list): ''' Divide the unsorted list at the midpoint into sublists. Takes O(k log n) quasilinear time. ''' if linked_list == None or linked_list.head == None: left_half = linked_list right_half = None return left_half, right_half else: # non-empty linked lists size = linked_list.size() midpoint = size // 2 mid_node = linked_list.node_at_index(midpoint-1) left_half = linked_list right_half = LinkedList() right_half = mid_node.next_node mid_node.next_node = None return left_half, right_half def merge(left, right): ''' Merges two linked lists, sorting by data in nodes. Returns a new, merged list. Runs in O(n) linear time. ''' # Create a new linked list that contains nodes from # merging left and right merged = LinkedList() # Add a fake head that is discarded later to simplify code merged.add(0) # Set current to the head of the linked list current = merged.head # Obtain head nodes for left and right linked lists left_head = left.head right_head = right.head # Iterate over left and right until we reach the tail node # of either while left_head or right_head: # If the head node of the left is None, we're past the tail # Add the node from right to merged linkned list if left_head is None: current.next_node = right_head # Call next on right to set loop condition to False right_head = right_head.next_node # If the head node of right is None, we're past the tail # Add the tail node from left to merged linked list elif right_head is None: current.next_node = left_head # Call next on left to set loop condition to False left_head = left_head.next_node else: # Not at either tail node # Obtain node data to perform comparison operations left_data = left_head.data right_data = right_head.data # If data on left is less than right, set current to left node if left_data < right_data: current.next_node = left_head # Move left head to next node left_head = left_head.next_node # If data on left is greater than right, set current to right node else: current.next_node = right_head # Move right head to next node right_head = right_head.next_node # Move current to next node current = current.next_node # Discard fake head and set first merged node as head head = merged.head.next_node merged.head = head return merged l = LinkedList() l.add(10) l.add(2) l.add(44) l.add(15) l.add(200) print(l) sorted_linked_list = merge_sort(l) print(sorted_linked_list)
32.288136
81
0.630971
546
3,810
4.261905
0.254579
0.09884
0.03223
0.021917
0.226042
0.155565
0.023206
0.023206
0.023206
0
0
0.008617
0.299475
3,810
117
82
32.564103
0.863245
0.425197
0
0.288136
0
0
0
0
0
0
0
0
0
1
0.050847
false
0
0.016949
0
0.169492
0.033898
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5438fd129b5b6996b6b2555c75bb6bb382b7d5
5,639
py
Python
nearpy/examples/example2.py
samyoo78/NearPy
1b534b864d320d875508e95cd2b76b6d8c07a90b
[ "MIT" ]
624
2015-01-02T21:45:28.000Z
2022-03-02T11:04:27.000Z
nearpy/examples/example2.py
samyoo78/NearPy
1b534b864d320d875508e95cd2b76b6d8c07a90b
[ "MIT" ]
65
2015-02-06T09:47:46.000Z
2021-09-26T01:45:26.000Z
nearpy/examples/example2.py
samyoo78/NearPy
1b534b864d320d875508e95cd2b76b6d8c07a90b
[ "MIT" ]
136
2015-01-07T04:45:41.000Z
2021-11-25T17:46:07.000Z
# -*- coding: utf-8 -*- # Copyright (c) 2013 Ole Krause-Sparmann # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import numpy import scipy import unittest import time from nearpy import Engine from nearpy.distances import CosineDistance from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper def example2(): # Dimension of feature space DIM = 100 # Number of data points (dont do too much because of exact search) POINTS = 20000 ########################################################## print('Performing indexing with HashPermutations...') t0 = time.time() # Create permutations meta-hash permutations = HashPermutations('permut') # Create binary hash as child hash rbp_perm = RandomBinaryProjections('rbp_perm', 14) rbp_conf = {'num_permutation':50,'beam_size':10,'num_neighbour':100} # Add rbp as child hash of permutations hash permutations.add_child_hash(rbp_perm, rbp_conf) # Create engine engine_perm = Engine(DIM, lshashes=[permutations], distance=CosineDistance()) # First index some random vectors matrix = numpy.zeros((POINTS,DIM)) for i in range(POINTS): v = numpy.random.randn(DIM) matrix[i] = v engine_perm.store_vector(v) # Then update permuted index permutations.build_permuted_index() t1 = time.time() print('Indexing took %f seconds' % (t1-t0)) # Get random query vector query = numpy.random.randn(DIM) # Do random query on engine 3 print('\nNeighbour distances with HashPermutations:') print(' -> Candidate count is %d' % engine_perm.candidate_count(query)) results = engine_perm.neighbours(query) dists = [x[2] for x in results] print(dists) # Real neighbours print('\nReal neighbour distances:') query = query.reshape((DIM)) dists = CosineDistance().distance(matrix, query) dists = dists.reshape((-1,)) dists = sorted(dists) print(dists[:10]) ########################################################## print('\nPerforming indexing with HashPermutationMapper...') t0 = time.time() # Create permutations meta-hash permutations2 = HashPermutationMapper('permut2') # Create binary hash as child hash rbp_perm2 = RandomBinaryProjections('rbp_perm2', 14) # Add rbp as child hash of permutations hash permutations2.add_child_hash(rbp_perm2) # Create engine engine_perm2 = Engine(DIM, lshashes=[permutations2], distance=CosineDistance()) # First index some random vectors matrix = numpy.zeros((POINTS,DIM)) for i in range(POINTS): v = numpy.random.randn(DIM) matrix[i] = v engine_perm2.store_vector(v) t1 = time.time() print('Indexing took %f seconds' % (t1-t0)) # Get random query vector query = numpy.random.randn(DIM) # Do random query on engine 4 print('\nNeighbour distances with HashPermutationMapper:') print(' -> Candidate count is %d' % engine_perm2.candidate_count(query)) results = engine_perm2.neighbours(query) dists = [x[2] for x in results] print(dists) # Real neighbours print('\nReal neighbour distances:') query = query.reshape((DIM)) dists = CosineDistance().distance(matrix,query) dists = dists.reshape((-1,)) dists = sorted(dists) print(dists[:10]) ########################################################## print('\nPerforming indexing with multiple binary hashes...') t0 = time.time() hashes = [] for k in range(20): hashes.append(RandomBinaryProjections('rbp_%d' % k, 10)) # Create engine engine_rbps = Engine(DIM, lshashes=hashes, distance=CosineDistance()) # First index some random vectors matrix = numpy.zeros((POINTS,DIM)) for i in range(POINTS): v = numpy.random.randn(DIM) matrix[i] = v engine_rbps.store_vector(v) t1 = time.time() print('Indexing took %f seconds' % (t1-t0)) # Get random query vector query = numpy.random.randn(DIM) # Do random query on engine 4 print('\nNeighbour distances with multiple binary hashes:') print(' -> Candidate count is %d' % engine_rbps.candidate_count(query)) results = engine_rbps.neighbours(query) dists = [x[2] for x in results] print(dists) # Real neighbours print('\nReal neighbour distances:') query = query.reshape((DIM)) dists = CosineDistance().distance(matrix,query) dists = dists.reshape((-1,)) dists = sorted(dists) print(dists[:10]) ##########################################################
32.039773
90
0.662529
697
5,639
5.308465
0.291248
0.023784
0.025946
0.030811
0.481081
0.455135
0.432432
0.396757
0.377838
0.377838
0
0.014889
0.201986
5,639
175
91
32.222857
0.807333
0.311048
0
0.551724
0
0
0.164497
0.012739
0
0
0
0
0
1
0.011494
false
0
0.08046
0
0.091954
0.241379
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a54995b61e4b0596e764748ce12766155458309
2,551
py
Python
discordbot.py
8ka1alu/heroku-global-py
7968ff6c215d6d86149221c246b4aaa5cd04df59
[ "MIT" ]
null
null
null
discordbot.py
8ka1alu/heroku-global-py
7968ff6c215d6d86149221c246b4aaa5cd04df59
[ "MIT" ]
null
null
null
discordbot.py
8ka1alu/heroku-global-py
7968ff6c215d6d86149221c246b4aaa5cd04df59
[ "MIT" ]
null
null
null
from discord.ext import commands, tasks # Bot Commands Frameworkをインポート import traceback # エラー表示のためにインポート import os import discord import r TOKEN = os.environ['DISCORD_BOT_TOKEN'] prefix = os.environ['DISCORD_BOT_PREFIX'] #プレフィックス # 読み込むコグの名前を格納しておく。 INITIAL_EXTENSIONS = [ 'cogs.eval', 'cogs.glchat', 'cogs.gladd', 'cogs.gldel' ] # クラスの定義。ClientのサブクラスであるBotクラスを継承。 class MyBot(commands.Bot): # MyBotのコンストラクタ。 def __init__(self, command_prefix, help_command): # スーパークラスのコンストラクタに値を渡して実行。 super().__init__(command_prefix,help_command) # INITIAL_COGSに格納されている名前から、コグを読み込む。 # エラーが発生した場合は、エラー内容を表示。 for cog in INITIAL_EXTENSIONS: try: self.load_extension(cog) except Exception: traceback.print_exc() # Botの準備完了時に呼び出されるイベント async def on_ready(self): print(self.user.name) # ボットの名前 print(self.user.id) # ボットのID print(discord.__version__) # discord.pyのバージョン print('----------------') print('Hello World !!') await self.change_presence(status=discord.Status.idle,activity=discord.Game(name=f'Ping:{self.ws.latency * 1000:.0f}ms')) conn=r.connect() ky=conn.keys() global_ch="gloch" count=0 for i in ky: i=str(i) if i == global_ch: count+=1 if count>0: smsd=conn.smembers(global_ch) count=0 for q in smsd: q=str(q) if q=="0": count+=1 if count>0: p=conn.srem(global_ch,"0") if p==True: print("正常起動") else: print("異常発生") else: print(ky) else: p=conn.sadd(global_ch,"0") if p==True: print("正常起動") else: print("異常発生") class JapaneseHelpCommand(commands.DefaultHelpCommand): def __init__(self): super().__init__() self.commands_heading = "コマンド:" self.no_category = "その他" self.command_attrs["help"] = "コマンド一覧と簡単な説明を表示" def get_ending_note(self): return (f"各コマンドの説明: {prefix}help <コマンド名>\n" f"各カテゴリの説明: {prefix}help <カテゴリ名>\n") #MyBotのインスタンス化及び起動処理。 if __name__ == '__main__': bot = MyBot(command_prefix=prefix,help_command=JapaneseHelpCommand()) # command_prefixはコマンドの最初の文字として使うもの。 e.g. !ping bot.run(TOKEN) # Botのトークン
29.321839
130
0.563701
270
2,551
5.118519
0.462963
0.036179
0.036903
0.027496
0.075253
0.054993
0.054993
0.054993
0.054993
0.054993
0
0.008065
0.319483
2,551
86
131
29.662791
0.788018
0.126617
0
0.231884
0
0
0.118951
0.009498
0
0
0
0
0
1
0.043478
false
0
0.072464
0.014493
0.15942
0.15942
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a54b71c70f57dfff2df2e65acf1f13a323c5a9e
9,468
py
Python
examples/plot_spectral_unmixing.py
ealopez/pycroscopy
9f7c0543b67eaa0668296295fc5f492360c130a0
[ "MIT" ]
null
null
null
examples/plot_spectral_unmixing.py
ealopez/pycroscopy
9f7c0543b67eaa0668296295fc5f492360c130a0
[ "MIT" ]
null
null
null
examples/plot_spectral_unmixing.py
ealopez/pycroscopy
9f7c0543b67eaa0668296295fc5f492360c130a0
[ "MIT" ]
null
null
null
""" ================================================================= Spectral Unmixing ================================================================= Suhas Somnath, Rama K. Vasudevan, Stephen Jesse * Institute for Functional Imaging of Materials * Center for Nanophase Materials Sciences Oak Ridge National Laboratory, Oak Ridge TN 37831, USA In this notebook we load some spectral data, and perform basic data analysis, including: ======================================================================================== * KMeans Clustering * Non-negative Matrix Factorization * Principal Component Analysis Software Prerequisites: ======================= * Standard distribution of **Anaconda** (includes numpy, scipy, matplotlib and sci-kit learn) * **pycroscopy** : Though pycroscopy is mainly used here for plotting purposes only, it's true capabilities are realized through the ability to seamlessly perform these analyses on any imaging dataset (regardless of origin, size, complexity) and storing the results back into the same dataset among other things """ # Import packages # Ensure that this code works on both python 2 and python 3 from __future__ import division, print_function, absolute_import, unicode_literals # basic numeric computation: import numpy as np # The package used for creating and manipulating HDF5 files: import h5py # Plotting and visualization: import matplotlib.pyplot as plt # for downloading files: import wget import os # multivariate analysis: from sklearn.cluster import KMeans from sklearn.decomposition import NMF import subprocess import sys def install(package): subprocess.call([sys.executable, "-m", "pip", "install", package]) # Package for downloading online files: # finally import pycroscopy: try: import pycroscopy as px except ImportError: print('pycroscopy not found. Will install with pip.') import pip install('pycroscopy') import pycroscopy as px from pycroscopy.viz import cluster_utils ##################################################################################### # The Data # ======== # # In this example, we will work on a **Band Excitation Piezoresponse Force Microscopy (BE-PFM)** imaging dataset # acquired from advanced atomic force microscopes. In this dataset, a spectra was collected for each position in a two # dimensional grid of spatial locations. Thus, this is a three dimensional dataset that has been flattened to a two # dimensional matrix in accordance with the pycroscopy data format. # # Fortunately, all statistical analysis, machine learning, spectral unmixing algorithms, etc. only accept data that is # formatted in the same manner of [position x spectra] in a two dimensional matrix. # # We will be using an data file available on our GitHub project page by default. You are encouraged # to download this document as a Jupyter Notebook (button at the bottom of the page) and use your own data instead. # When using your own data, you can skip this cell and provide the path to your data using the variable - data_file_path data_file_path = 'temp_um.h5' # download the data file from Github: url = 'https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/BELine_0004.h5' data_file_path = wget.download(url, data_file_path, bar=None) h5_file = h5py.File(data_file_path, mode='r+') print('Contents of data file:') print('----------------------') px.hdf_utils.print_tree(h5_file) print('----------------------') h5_meas_grp = h5_file['Measurement_000'] # Extracting some basic parameters: num_rows = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_rows') num_cols = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_cols') # Getting a reference to the main dataset: h5_main = px.PycroDataset(h5_meas_grp['Channel_000/Raw_Data']) px.hdf_utils.write_simple_attrs(h5_main, {'quantity': 'Deflection', 'units': 'V'}) # Extracting the X axis - vector of frequencies h5_spec_vals = px.hdf_utils.get_auxiliary_datasets(h5_main, 'Spectroscopic_Values')[-1] freq_vec = np.squeeze(h5_spec_vals.value) * 1E-3 print('Data currently of shape:', h5_main.shape) x_label = 'Frequency (kHz)' y_label = 'Amplitude (a.u.)' ##################################################################################### # 1. Singular Value Decomposition (SVD) # ===================================== # # SVD is an eigenvector decomposition that is defined statistically, and therefore typically produces # non-physical eigenvectors. Consequently, the interpretation of eigenvectors and abundance maps from # SVD requires care and caution in interpretation. Nonetheless, it is a good method for quickly # visualizing the major trends in the dataset since the resultant eigenvectors are sorted in descending # order of variance or importance. Furthermore, SVD is also very well suited for data cleaning through # the reconstruction of the dataset using only the first N (most significant) components. # # SVD results in three matrices: # # * V - Eigenvectors sorted by variance in descending order # * U - corresponding abundance maps # * S - Variance or importance of each of these components # # Advantage of pycroscopy: # ------------------------ # Notice that we are working with a complex valued dataset. Passing the complex values as is to SVD would result in # complex valued eigenvectors / endmembers as well as abundance maps. Complex valued abundance maps are not physical. # Thus, one would need to restructure the data such that it is real-valued only. # # One solution is to stack the real value followed by the magnitude of the imaginary component before passing to SVD. # After SVD, the real-valued eigenvectors would need to be treated as the concatenation of the real and imaginary # components. So, the eigenvectors would need to be restructured to get back the complex valued eigenvectors. # # **Pycroscopy handles all these data transformations (both for the source dataset and the eigenvectors) # automatically.** In general, pycroscopy handles compound / complex valued datasets everywhere possible # # Furthermore, while it is not discussed in this example, pycroscopy also writes back the results from SVD back to # the same source h5 file including all relevant links to the source dataset and other ancillary datasets decomposer = px.processing.svd_utils.SVD(h5_main, num_components=100) h5_svd_group = decomposer.compute() h5_u = h5_svd_group['U'] h5_v = h5_svd_group['V'] h5_s = h5_svd_group['S'] # Since the two spatial dimensions (x, y) have been collapsed to one, we need to reshape the abundance maps: abun_maps = np.reshape(h5_u[:, :25], (num_rows, num_cols, -1)) px.plot_utils.plot_map_stack(abun_maps, num_comps=9, title='SVD Abundance Maps', reverse_dims=True, color_bar_mode='single', cmap='inferno', title_yoffset=0.95) # Visualize the variance / statistical importance of each component: px.plot_utils.plot_scree(h5_s, title='Note the exponential drop of variance with number of components') # Visualize the eigenvectors: _ = px.plot_utils.plot_complex_spectra(h5_v[:9, :], x_label=x_label, y_label=y_label, title='SVD Eigenvectors', evenly_spaced=False) ##################################################################################### # 2. KMeans Clustering # ==================== # # KMeans clustering is a quick and easy method to determine the types of spectral responses present in the # data. It is not a decomposition method, but a basic clustering method. The user inputs the number of # clusters (sets) to partition the data into. The algorithm proceeds to find the optimal labeling # (ie., assignment of each spectra as belonging to the k<sup>th</sup> set) such that the within-cluster # sum of squares is minimized. # # Set the number of clusters below num_clusters = 4 estimator = px.processing.Cluster(h5_main, KMeans(n_clusters=num_clusters)) h5_kmeans_grp = estimator.compute(h5_main) h5_kmeans_labels = h5_kmeans_grp['Labels'] h5_kmeans_mean_resp = h5_kmeans_grp['Mean_Response'] cluster_utils.plot_cluster_h5_group(h5_kmeans_grp) ##################################################################################### # 3. Non-negative Matrix Factorization (NMF) # =========================================== # # NMF, or non-negative matrix factorization, is a method that is useful towards unmixing of spectral # data. It only works on data with positive real values. It operates by approximate determination of # factors (matrices) W and H, given a matrix V, as shown below # # .. image:: https://upload.wikimedia.org/wikipedia/commons/f/f9/NMF.png # # Unlike SVD and k-Means that can be applied to complex-valued datasets, NMF only works on non-negative datasets. # For illustrative purposes, we will only take the amplitude component of the spectral data num_comps = 4 # get the non-negative portion of the dataset data_mat = np.abs(h5_main) model = NMF(n_components=num_comps, init='random', random_state=0) model.fit(data_mat) fig, axis = plt.subplots(figsize=(5.5, 5)) px.plot_utils.plot_line_family(axis, freq_vec, model.components_, label_prefix='NMF Component #') axis.set_xlabel(x_label, fontsize=12) axis.set_ylabel(y_label, fontsize=12) axis.set_title('NMF Components', fontsize=14) axis.legend(bbox_to_anchor=[1.0, 1.0], fontsize=12) ##################################################################################### # Close and delete the h5_file h5_file.close() os.remove(data_file_path)
43.036364
120
0.705218
1,320
9,468
4.943939
0.372727
0.011033
0.011033
0.009194
0.024517
0.010113
0.010113
0.010113
0.010113
0.010113
0
0.011642
0.13815
9,468
219
121
43.232877
0.788113
0.617448
0
0.059701
0
0
0.178422
0.014171
0
0
0
0
0
1
0.014925
false
0
0.223881
0
0.238806
0.104478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a550e70bdbd276329aa52d0c840e8979d0e9e43
2,113
py
Python
question.py
Lilium765/momoko
c84b37cbe280055fedaac4ee9195d6410b234aba
[ "MIT" ]
null
null
null
question.py
Lilium765/momoko
c84b37cbe280055fedaac4ee9195d6410b234aba
[ "MIT" ]
null
null
null
question.py
Lilium765/momoko
c84b37cbe280055fedaac4ee9195d6410b234aba
[ "MIT" ]
null
null
null
import discord client = discord.Client() # 接続に使用するオブジェクト # 起動時 @client.event async def on_ready(): print('ログイン成功') # メッセージを監視 @client.event async def on_message(message): # 「/box」が頭についたメッセージならオウム返しする if message.content.startswith('/box'): # 文字から「/box」を抜く question = message.content[len('/box'):].strip() # 質問させたいチャンネルのid target_channel_id = getTargetChannelId() # id=0なら質問者にエラー報告DM # idが0以外なら匿名質問する if target_channel_id == 0: dm = await message.author.create_dm() # 質問者へDM作成 await dm.send( 'Sorry, メッセージを送信できませんでした.' 'もう1度試してみてください.\n' '【質問文】' + question) else: # 匿名質問させたいチャンネル target_channel = client.get_channel(target_channel_id) # チャンネルに質問メッセージ送信 await target_channel.send(question) # 匿名質問させたいチャンネルのidを取得 # 指定したカテゴリにある最初のTextチャンネル=質問させたいチャンネルとみなす # ただしカテゴリにチャンネルが無い時は0を返す def getTargetChannelId() -> int: # 質問させたいチャンネル(対象チャンネル) target_channel = {'id': 0, 'position': 99999999} # *********************************************************** # 指定カテゴリ(対象チャンネルが含まれたカテゴリ)の名前 category_id = 711238137598181396 # カテゴリidを指定 target_category_name = client.get_channel(category_id).name # *********************************************************** # 指定したサーバにある全てのTextチャンネル一覧 all_channels = client.get_guild(602423784946925568).text_channels # 全てのTextチャンネルから「指定カテゴリに属する最初のチャンネル」を探す for channel in all_channels: # 指定カテゴリに属する場合だけ対象チャンネル候補とみなす if str(channel.category) == target_category_name: # positionが小さいほうを「より対象チャンネルに近い」として交換 # 初期値はpositionが大きい(99999999)ので,必ず入れ替わる想定 # 繰り返せば,最後にはpositionが最も小さいチャンネルを代入できる if target_channel['position'] > int(channel.position): target_channel['id'] = int(channel.id) target_channel['position'] = int(channel.position) # 最終的に代入されたidを返す return target_channel['id'] # botとしてDiscordに接続(botのトークンを指定) client.run('605042341715378176')
30.623188
69
0.618552
178
2,113
7.185393
0.505618
0.101642
0.070367
0.029711
0.093823
0.060985
0
0
0
0
0
0.047059
0.235684
2,113
68
70
31.073529
0.744892
0.313299
0
0.064516
0
0
0.075193
0
0
0
0
0
0
1
0.032258
false
0
0.032258
0
0.096774
0.032258
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a55afc76d238a2edb1a2adff77422f604912e7b
25,760
py
Python
rpython/annotator/annrpython.py
microvm/pypy-mu
6b03fbe93052d0eb3a4c67152c987c16837b3484
[ "Apache-2.0", "OpenSSL" ]
null
null
null
rpython/annotator/annrpython.py
microvm/pypy-mu
6b03fbe93052d0eb3a4c67152c987c16837b3484
[ "Apache-2.0", "OpenSSL" ]
null
null
null
rpython/annotator/annrpython.py
microvm/pypy-mu
6b03fbe93052d0eb3a4c67152c987c16837b3484
[ "Apache-2.0", "OpenSSL" ]
null
null
null
from __future__ import absolute_import import types from collections import defaultdict from rpython.tool.ansi_print import AnsiLogger from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, gather_error, source_lines) from rpython.flowspace.model import Variable, Constant, checkgraph from rpython.translator import simplify, transform from rpython.annotator import model as annmodel, signature from rpython.annotator.model import ( typeof, s_ImpossibleValue, SomeInstance, intersection, difference) from rpython.annotator.bookkeeper import Bookkeeper from rpython.rtyper.normalizecalls import perform_normalizations log = AnsiLogger("annrpython") class RPythonAnnotator(object): """Block annotator for RPython. See description in doc/translation.txt.""" def __init__(self, translator=None, policy=None, bookkeeper=None): import rpython.rtyper.extfuncregistry # has side effects if translator is None: # interface for tests from rpython.translator.translator import TranslationContext translator = TranslationContext() translator.annotator = self self.translator = translator self.pendingblocks = {} # map {block: graph-containing-it} self.annotated = {} # set of blocks already seen self.added_blocks = None # see processblock() below self.links_followed = {} # set of links that have ever been followed self.notify = {} # {block: {positions-to-reflow-from-when-done}} self.fixed_graphs = {} # set of graphs not to annotate again self.blocked_blocks = {} # set of {blocked_block: (graph, index)} # --- the following information is recorded for debugging --- self.blocked_graphs = {} # set of graphs that have blocked blocks # --- end of debugging information --- self.frozen = False if policy is None: from rpython.annotator.policy import AnnotatorPolicy self.policy = AnnotatorPolicy() else: self.policy = policy if bookkeeper is None: bookkeeper = Bookkeeper(self) self.bookkeeper = bookkeeper def __getstate__(self): attrs = """translator pendingblocks annotated links_followed notify bookkeeper frozen policy added_blocks""".split() ret = self.__dict__.copy() for key, value in ret.items(): if key not in attrs: assert type(value) is dict, ( "%r is not dict. please update %s.__getstate__" % (key, self.__class__.__name__)) ret[key] = {} return ret #___ convenience high-level interface __________________ def build_types(self, function, input_arg_types, complete_now=True, main_entry_point=False): """Recursively build annotations about the specific entry point.""" assert isinstance(function, types.FunctionType), "fix that!" from rpython.annotator.policy import AnnotatorPolicy policy = AnnotatorPolicy() # make input arguments and set their type args_s = [self.typeannotation(t) for t in input_arg_types] # XXX hack annmodel.TLS.check_str_without_nul = ( self.translator.config.translation.check_str_without_nul) flowgraph, inputs_s = self.get_call_parameters(function, args_s, policy) if main_entry_point: self.translator.entry_point_graph = flowgraph return self.build_graph_types(flowgraph, inputs_s, complete_now=complete_now) def get_call_parameters(self, function, args_s, policy): desc = self.bookkeeper.getdesc(function) prevpolicy = self.policy self.policy = policy self.bookkeeper.enter(None) try: return desc.get_call_parameters(args_s) finally: self.bookkeeper.leave() self.policy = prevpolicy def annotate_helper(self, function, args_s, policy=None): if policy is None: from rpython.annotator.policy import AnnotatorPolicy policy = AnnotatorPolicy() # XXX hack annmodel.TLS.check_str_without_nul = ( self.translator.config.translation.check_str_without_nul) graph, inputcells = self.get_call_parameters(function, args_s, policy) self.build_graph_types(graph, inputcells, complete_now=False) self.complete_helpers(policy) return graph def complete_helpers(self, policy): saved = self.policy, self.added_blocks self.policy = policy try: self.added_blocks = {} self.complete() # invoke annotation simplifications for the new blocks self.simplify(block_subset=self.added_blocks) finally: self.policy, self.added_blocks = saved def build_graph_types(self, flowgraph, inputcells, complete_now=True): checkgraph(flowgraph) nbarg = len(flowgraph.getargs()) assert len(inputcells) == nbarg # wrong number of args # register the entry point self.addpendinggraph(flowgraph, inputcells) # recursively proceed until no more pending block is left if complete_now: self.complete() return self.annotation(flowgraph.getreturnvar()) def gettype(self, variable): """Return the known type of a control flow graph variable, defaulting to 'object'.""" if isinstance(variable, Constant): return type(variable.value) elif isinstance(variable, Variable): s_variable = variable.annotation if s_variable: return s_variable.knowntype else: return object else: raise TypeError("Variable or Constant instance expected, " "got %r" % (variable,)) def getuserclassdefinitions(self): """Return a list of ClassDefs.""" return self.bookkeeper.classdefs #___ medium-level interface ____________________________ def addpendinggraph(self, flowgraph, inputcells): self.addpendingblock(flowgraph, flowgraph.startblock, inputcells) def addpendingblock(self, graph, block, cells): """Register an entry point into block with the given input cells.""" if graph in self.fixed_graphs: # special case for annotating/rtyping in several phases: calling # a graph that has already been rtyped. Safety-check the new # annotations that are passed in, and don't annotate the old # graph -- it's already low-level operations! for a, s_newarg in zip(block.inputargs, cells): s_oldarg = self.binding(a) assert annmodel.unionof(s_oldarg, s_newarg) == s_oldarg else: assert not self.frozen if block not in self.annotated: self.bindinputargs(graph, block, cells) else: self.mergeinputargs(graph, block, cells) if not self.annotated[block]: self.pendingblocks[block] = graph def complete_pending_blocks(self): while self.pendingblocks: block, graph = self.pendingblocks.popitem() self.processblock(graph, block) def complete(self): """Process pending blocks until none is left.""" while True: self.complete_pending_blocks() self.policy.no_more_blocks_to_annotate(self) if not self.pendingblocks: break # finished # make sure that the return variables of all graphs is annotated if self.added_blocks is not None: newgraphs = [self.annotated[block] for block in self.added_blocks] newgraphs = dict.fromkeys(newgraphs) got_blocked_blocks = False in newgraphs else: newgraphs = self.translator.graphs #all of them got_blocked_blocks = False in self.annotated.values() if got_blocked_blocks: for graph in self.blocked_graphs.values(): self.blocked_graphs[graph] = True blocked_blocks = [block for block, done in self.annotated.items() if done is False] assert len(blocked_blocks) == len(self.blocked_blocks) text = format_blocked_annotation_error(self, self.blocked_blocks) #raise SystemExit() raise annmodel.AnnotatorError(text) for graph in newgraphs: v = graph.getreturnvar() if v.annotation is None: self.setbinding(v, s_ImpossibleValue) def validate(self): """Check that the annotation results are valid""" self.bookkeeper.check_no_flags_on_instances() def annotation(self, arg): "Gives the SomeValue corresponding to the given Variable or Constant." if isinstance(arg, Variable): return arg.annotation elif isinstance(arg, Constant): return self.bookkeeper.immutablevalue(arg.value) else: raise TypeError('Variable or Constant expected, got %r' % (arg,)) def binding(self, arg): "Gives the SomeValue corresponding to the given Variable or Constant." s_arg = self.annotation(arg) if s_arg is None: raise KeyError return s_arg def typeannotation(self, t): return signature.annotation(t, self.bookkeeper) def setbinding(self, arg, s_value): s_old = arg.annotation if s_old is not None: if not s_value.contains(s_old): log.WARNING("%s does not contain %s" % (s_value, s_old)) log.WARNING("%s" % annmodel.unionof(s_value, s_old)) assert False arg.annotation = s_value def warning(self, msg, pos=None): if pos is None: try: pos = self.bookkeeper.position_key except AttributeError: pos = '?' if pos != '?': pos = self.whereami(pos) log.WARNING("%s/ %s" % (pos, msg)) #___ interface for annotator.bookkeeper _______ def recursivecall(self, graph, whence, inputcells): if isinstance(whence, tuple): parent_graph, parent_block, parent_index = whence tag = parent_block, parent_index self.translator.update_call_graph(parent_graph, graph, tag) # self.notify[graph.returnblock] is a dictionary of call # points to this func which triggers a reflow whenever the # return block of this graph has been analysed. callpositions = self.notify.setdefault(graph.returnblock, {}) if whence is not None: if callable(whence): def callback(): whence(self, graph) else: callback = whence callpositions[callback] = True # generalize the function's input arguments self.addpendingblock(graph, graph.startblock, inputcells) # get the (current) return value v = graph.getreturnvar() try: return self.binding(v) except KeyError: # the function didn't reach any return statement so far. # (some functions actually never do, they always raise exceptions) return s_ImpossibleValue def reflowfromposition(self, position_key): graph, block, index = position_key self.reflowpendingblock(graph, block) def call_sites(self): newblocks = self.added_blocks if newblocks is None: newblocks = self.annotated # all of them for block in newblocks: for op in block.operations: if op.opname in ('simple_call', 'call_args'): yield op # some blocks are partially annotated if op.result.annotation is None: break # ignore the unannotated part #___ simplification (should be moved elsewhere?) _______ def simplify(self, block_subset=None, extra_passes=None): # Generic simplifications transform.transform_graph(self, block_subset=block_subset, extra_passes=extra_passes) if block_subset is None: graphs = self.translator.graphs else: graphs = {} for block in block_subset: graph = self.annotated.get(block) if graph: graphs[graph] = True for graph in graphs: simplify.eliminate_empty_blocks(graph) self.bookkeeper.compute_at_fixpoint() if block_subset is None: perform_normalizations(self) #___ flowing annotations in blocks _____________________ def processblock(self, graph, block): # Important: this is not called recursively. # self.flowin() can only issue calls to self.addpendingblock(). # The analysis of a block can be in three states: # * block not in self.annotated: # never seen the block. # * self.annotated[block] == False: # the input variables of the block have bindings but we # still have to consider all the operations in the block. # * self.annotated[block] == graph-containing-block: # analysis done (at least until we find we must generalize the # input variables). #print '* processblock', block, cells self.annotated[block] = graph if block in self.blocked_blocks: del self.blocked_blocks[block] try: self.flowin(graph, block) except BlockedInference as e: self.annotated[block] = False # failed, hopefully temporarily self.blocked_blocks[block] = (graph, e.opindex) except Exception as e: # hack for debug tools only if not hasattr(e, '__annotator_block'): setattr(e, '__annotator_block', block) raise # The dict 'added_blocks' is used by rpython.annlowlevel to # detect which are the new blocks that annotating an additional # small helper creates. if self.added_blocks is not None: self.added_blocks[block] = True def reflowpendingblock(self, graph, block): assert not self.frozen assert graph not in self.fixed_graphs self.pendingblocks[block] = graph assert block in self.annotated self.annotated[block] = False # must re-flow self.blocked_blocks[block] = (graph, None) def bindinputargs(self, graph, block, inputcells): # Create the initial bindings for the input args of a block. assert len(block.inputargs) == len(inputcells) for a, cell in zip(block.inputargs, inputcells): self.setbinding(a, cell) self.annotated[block] = False # must flowin. self.blocked_blocks[block] = (graph, None) def mergeinputargs(self, graph, block, inputcells): # Merge the new 'cells' with each of the block's existing input # variables. oldcells = [self.binding(a) for a in block.inputargs] try: unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)] except annmodel.UnionError as e: # Add source code to the UnionError e.source = '\n'.join(source_lines(graph, block, None, long=True)) raise # if the merged cells changed, we must redo the analysis if unions != oldcells: self.bindinputargs(graph, block, unions) def apply_renaming(self, s_out, renaming): if hasattr(s_out, 'is_type_of'): renamed_is_type_of = [] for v in s_out.is_type_of: renamed_is_type_of += renaming[v] assert s_out.knowntype is type newcell = typeof(renamed_is_type_of) if s_out.is_constant(): newcell.const = s_out.const s_out = newcell if hasattr(s_out, 'knowntypedata'): renamed_knowntypedata = {} for value, constraints in s_out.knowntypedata.items(): renamed_knowntypedata[value] = {} for v, s in constraints.items(): new_vs = renaming.get(v, []) for new_v in new_vs: renamed_knowntypedata[value][new_v] = s assert isinstance(s_out, annmodel.SomeBool) newcell = annmodel.SomeBool() if s_out.is_constant(): newcell.const = s_out.const s_out = newcell s_out.set_knowntypedata(renamed_knowntypedata) return s_out def whereami(self, position_key): graph, block, i = position_key blk = "" if block: at = block.at() if at: blk = " block"+at opid="" if i is not None: opid = " op=%d" % i return repr(graph) + blk + opid def flowin(self, graph, block): try: i = 0 while i < len(block.operations): op = block.operations[i] with self.bookkeeper.at_position((graph, block, i)): new_ops = op.transform(self) if new_ops is not None: block.operations[i:i+1] = new_ops if not new_ops: continue new_ops[-1].result = op.result op = new_ops[0] self.consider_op(op) i += 1 except BlockedInference as e: if e.op is block.raising_op: # this is the case where the last operation of the block will # always raise an exception which is immediately caught by # an exception handler. We then only follow the exceptional # branches. exits = [link for link in block.exits if link.exitcase is not None] elif e.op.opname in ('simple_call', 'call_args', 'next'): # XXX warning, keep the name of the call operations in sync # with the flow object space. These are the operations for # which it is fine to always raise an exception. We then # swallow the BlockedInference and that's it. # About 'next': see test_annotate_iter_empty_container(). return else: # other cases are problematic (but will hopefully be solved # later by reflowing). Throw the BlockedInference up to # processblock(). e.opindex = i raise except annmodel.HarmlesslyBlocked: return except annmodel.AnnotatorError as e: # note that UnionError is a subclass e.source = gather_error(self, graph, block, i) raise else: # dead code removal: don't follow all exits if the exitswitch # is known exits = block.exits if isinstance(block.exitswitch, Variable): s_exitswitch = self.binding(block.exitswitch) if s_exitswitch.is_constant(): exits = [link for link in exits if link.exitcase == s_exitswitch.const] if block.canraise: op = block.raising_op s_exception = self.get_exception(op) for link in exits: case = link.exitcase if case is None: self.follow_link(graph, link, {}) continue if s_exception == s_ImpossibleValue: break s_case = SomeInstance(self.bookkeeper.getuniqueclassdef(case)) s_matching_exc = intersection(s_exception, s_case) if s_matching_exc != s_ImpossibleValue: self.follow_raise_link(graph, link, s_matching_exc) s_exception = difference(s_exception, s_case) else: if isinstance(block.exitswitch, Variable): knowntypedata = getattr( block.exitswitch.annotation, "knowntypedata", {}) else: knowntypedata = {} for link in exits: constraints = knowntypedata.get(link.exitcase, {}) self.follow_link(graph, link, constraints) if block in self.notify: # reflow from certain positions when this block is done for callback in self.notify[block]: if isinstance(callback, tuple): self.reflowfromposition(callback) # callback is a position else: callback() def follow_link(self, graph, link, constraints): assert not (isinstance(link.exitcase, (types.ClassType, type)) and issubclass(link.exitcase, BaseException)) ignore_link = False inputs_s = [] renaming = defaultdict(list) for v_out, v_input in zip(link.args, link.target.inputargs): renaming[v_out].append(v_input) for v_out in link.args: s_out = self.annotation(v_out) if v_out in constraints: s_constraint = constraints[v_out] s_out = pair(s_out, s_constraint).improve() # ignore links that try to pass impossible values if s_out == s_ImpossibleValue: ignore_link = True s_out = self.apply_renaming(s_out, renaming) inputs_s.append(s_out) if ignore_link: return self.links_followed[link] = True self.addpendingblock(graph, link.target, inputs_s) def follow_raise_link(self, graph, link, s_last_exc_value): v_last_exc_type = link.last_exception v_last_exc_value = link.last_exc_value assert (isinstance(link.exitcase, (types.ClassType, type)) and issubclass(link.exitcase, BaseException)) assert v_last_exc_type and v_last_exc_value if isinstance(v_last_exc_value, Variable): self.setbinding(v_last_exc_value, s_last_exc_value) if isinstance(v_last_exc_type, Variable): self.setbinding(v_last_exc_type, typeof([v_last_exc_value])) inputs_s = [] renaming = defaultdict(list) for v_out, v_input in zip(link.args, link.target.inputargs): renaming[v_out].append(v_input) for v_out, v_input in zip(link.args, link.target.inputargs): if v_out == v_last_exc_type: s_out = typeof(renaming[v_last_exc_value]) if isinstance(v_last_exc_type, Constant): s_out.const = v_last_exc_type.value elif v_last_exc_type.annotation.is_constant(): s_out.const = v_last_exc_type.annotation.const inputs_s.append(s_out) else: s_out = self.annotation(v_out) s_out = self.apply_renaming(s_out, renaming) inputs_s.append(s_out) self.links_followed[link] = True self.addpendingblock(graph, link.target, inputs_s) #___ creating the annotations based on operations ______ def consider_op(self, op): # let's be careful about avoiding propagated SomeImpossibleValues # to enter an op; the latter can result in violations of the # more general results invariant: e.g. if SomeImpossibleValue enters is_ # is_(SomeImpossibleValue, None) -> SomeBool # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... # boom -- in the assert of setbinding() for arg in op.args: if isinstance(self.annotation(arg), annmodel.SomeImpossibleValue): raise BlockedInference(self, op, -1) resultcell = op.consider(self) if resultcell is None: resultcell = s_ImpossibleValue elif resultcell == s_ImpossibleValue: raise BlockedInference(self, op, -1) # the operation cannot succeed assert isinstance(resultcell, annmodel.SomeObject) assert isinstance(op.result, Variable) self.setbinding(op.result, resultcell) # bind resultcell to op.result def get_exception(self, operation): """ Return the annotation for all exceptions that `operation` may raise. """ can_only_throw = operation.get_can_only_throw(self) if can_only_throw is None: return SomeInstance(self.bookkeeper.getuniqueclassdef(Exception)) else: return self.bookkeeper.new_exception(can_only_throw) class BlockedInference(Exception): """This exception signals the type inference engine that the situation is currently blocked, and that it should try to progress elsewhere.""" def __init__(self, annotator, op, opindex): self.annotator = annotator try: self.break_at = annotator.bookkeeper.position_key except AttributeError: self.break_at = None self.op = op self.opindex = opindex def __repr__(self): if not self.break_at: break_at = "?" else: break_at = self.annotator.whereami(self.break_at) return "<BlockedInference break_at %s [%s]>" %(break_at, self.op) __str__ = __repr__
40.312989
85
0.604891
2,915
25,760
5.160206
0.172213
0.007978
0.007978
0.00718
0.177702
0.125981
0.11428
0.102579
0.087488
0.076851
0
0.000628
0.319876
25,760
638
86
40.376176
0.857934
0.181211
0
0.237581
0
0
0.027592
0
0
0
0
0
0.038877
1
0.079914
false
0.00432
0.036717
0.00216
0.172786
0.00216
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5657a9ba609c4b80f71648ff43f36e553c66c2
26,287
py
Python
azure-devops/azext_devops/vstsCompressed/work_item_tracking_process/v4_0/models/models.py
vijayraavi/azure-devops-cli-extension
88f1420c5815cb09bea15b050f4c553e0f326dad
[ "MIT" ]
null
null
null
azure-devops/azext_devops/vstsCompressed/work_item_tracking_process/v4_0/models/models.py
vijayraavi/azure-devops-cli-extension
88f1420c5815cb09bea15b050f4c553e0f326dad
[ "MIT" ]
37
2020-04-27T07:45:19.000Z
2021-04-05T07:27:15.000Z
azure-devops/azext_devops/vstsCompressed/work_item_tracking_process/v4_0/models/models.py
vijayraavi/azure-devops-cli-extension
88f1420c5815cb09bea15b050f4c553e0f326dad
[ "MIT" ]
null
null
null
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest.serialization import Model class Control(Model): """Control. :param contribution: Contribution for the control. :type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>` :param control_type: Type of the control. :type control_type: str :param height: Height of the control, for html controls. :type height: int :param id: The id for the layout node. :type id: str :param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner. :type inherited: bool :param is_contribution: A value indicating if the layout node is contribution or not. :type is_contribution: bool :param label: Label for the field :type label: str :param metadata: Inner text of the control. :type metadata: str :param order: :type order: int :param overridden: A value indicating whether this layout node has been overridden by a child layout. :type overridden: bool :param read_only: A value indicating if the control is readonly. :type read_only: bool :param visible: A value indicating if the control should be hidden or not. :type visible: bool :param watermark: Watermark text for the textbox. :type watermark: str """ _attribute_map = { 'contribution': {'key': 'contribution', 'type': 'WitContribution'}, 'control_type': {'key': 'controlType', 'type': 'str'}, 'height': {'key': 'height', 'type': 'int'}, 'id': {'key': 'id', 'type': 'str'}, 'inherited': {'key': 'inherited', 'type': 'bool'}, 'is_contribution': {'key': 'isContribution', 'type': 'bool'}, 'label': {'key': 'label', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'str'}, 'order': {'key': 'order', 'type': 'int'}, 'overridden': {'key': 'overridden', 'type': 'bool'}, 'read_only': {'key': 'readOnly', 'type': 'bool'}, 'visible': {'key': 'visible', 'type': 'bool'}, 'watermark': {'key': 'watermark', 'type': 'str'} } def __init__(self, contribution=None, control_type=None, height=None, id=None, inherited=None, is_contribution=None, label=None, metadata=None, order=None, overridden=None, read_only=None, visible=None, watermark=None): super(Control, self).__init__() self.contribution = contribution self.control_type = control_type self.height = height self.id = id self.inherited = inherited self.is_contribution = is_contribution self.label = label self.metadata = metadata self.order = order self.overridden = overridden self.read_only = read_only self.visible = visible self.watermark = watermark class CreateProcessModel(Model): """CreateProcessModel. :param description: :type description: str :param name: :type name: str :param parent_process_type_id: :type parent_process_type_id: str :param reference_name: :type reference_name: str """ _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'parent_process_type_id': {'key': 'parentProcessTypeId', 'type': 'str'}, 'reference_name': {'key': 'referenceName', 'type': 'str'} } def __init__(self, description=None, name=None, parent_process_type_id=None, reference_name=None): super(CreateProcessModel, self).__init__() self.description = description self.name = name self.parent_process_type_id = parent_process_type_id self.reference_name = reference_name class Extension(Model): """Extension. :param id: :type id: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'} } def __init__(self, id=None): super(Extension, self).__init__() self.id = id class FieldModel(Model): """FieldModel. :param description: :type description: str :param id: :type id: str :param is_identity: :type is_identity: bool :param name: :type name: str :param type: :type type: object :param url: :type url: str """ _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'is_identity': {'key': 'isIdentity', 'type': 'bool'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'object'}, 'url': {'key': 'url', 'type': 'str'} } def __init__(self, description=None, id=None, is_identity=None, name=None, type=None, url=None): super(FieldModel, self).__init__() self.description = description self.id = id self.is_identity = is_identity self.name = name self.type = type self.url = url class FieldRuleModel(Model): """FieldRuleModel. :param actions: :type actions: list of :class:`RuleActionModel <work-item-tracking.v4_0.models.RuleActionModel>` :param conditions: :type conditions: list of :class:`RuleConditionModel <work-item-tracking.v4_0.models.RuleConditionModel>` :param friendly_name: :type friendly_name: str :param id: :type id: str :param is_disabled: :type is_disabled: bool :param is_system: :type is_system: bool """ _attribute_map = { 'actions': {'key': 'actions', 'type': '[RuleActionModel]'}, 'conditions': {'key': 'conditions', 'type': '[RuleConditionModel]'}, 'friendly_name': {'key': 'friendlyName', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'is_disabled': {'key': 'isDisabled', 'type': 'bool'}, 'is_system': {'key': 'isSystem', 'type': 'bool'} } def __init__(self, actions=None, conditions=None, friendly_name=None, id=None, is_disabled=None, is_system=None): super(FieldRuleModel, self).__init__() self.actions = actions self.conditions = conditions self.friendly_name = friendly_name self.id = id self.is_disabled = is_disabled self.is_system = is_system class FormLayout(Model): """FormLayout. :param extensions: Gets and sets extensions list :type extensions: list of :class:`Extension <work-item-tracking.v4_0.models.Extension>` :param pages: Top level tabs of the layout. :type pages: list of :class:`Page <work-item-tracking.v4_0.models.Page>` :param system_controls: Headers controls of the layout. :type system_controls: list of :class:`Control <work-item-tracking.v4_0.models.Control>` """ _attribute_map = { 'extensions': {'key': 'extensions', 'type': '[Extension]'}, 'pages': {'key': 'pages', 'type': '[Page]'}, 'system_controls': {'key': 'systemControls', 'type': '[Control]'} } def __init__(self, extensions=None, pages=None, system_controls=None): super(FormLayout, self).__init__() self.extensions = extensions self.pages = pages self.system_controls = system_controls class Group(Model): """Group. :param contribution: Contribution for the group. :type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>` :param controls: Controls to be put in the group. :type controls: list of :class:`Control <work-item-tracking.v4_0.models.Control>` :param height: The height for the contribution. :type height: int :param id: The id for the layout node. :type id: str :param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner. :type inherited: bool :param is_contribution: A value indicating if the layout node is contribution are not. :type is_contribution: bool :param label: Label for the group. :type label: str :param order: Order in which the group should appear in the section. :type order: int :param overridden: A value indicating whether this layout node has been overridden by a child layout. :type overridden: bool :param visible: A value indicating if the group should be hidden or not. :type visible: bool """ _attribute_map = { 'contribution': {'key': 'contribution', 'type': 'WitContribution'}, 'controls': {'key': 'controls', 'type': '[Control]'}, 'height': {'key': 'height', 'type': 'int'}, 'id': {'key': 'id', 'type': 'str'}, 'inherited': {'key': 'inherited', 'type': 'bool'}, 'is_contribution': {'key': 'isContribution', 'type': 'bool'}, 'label': {'key': 'label', 'type': 'str'}, 'order': {'key': 'order', 'type': 'int'}, 'overridden': {'key': 'overridden', 'type': 'bool'}, 'visible': {'key': 'visible', 'type': 'bool'} } def __init__(self, contribution=None, controls=None, height=None, id=None, inherited=None, is_contribution=None, label=None, order=None, overridden=None, visible=None): super(Group, self).__init__() self.contribution = contribution self.controls = controls self.height = height self.id = id self.inherited = inherited self.is_contribution = is_contribution self.label = label self.order = order self.overridden = overridden self.visible = visible class Page(Model): """Page. :param contribution: Contribution for the page. :type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>` :param id: The id for the layout node. :type id: str :param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner. :type inherited: bool :param is_contribution: A value indicating if the layout node is contribution are not. :type is_contribution: bool :param label: The label for the page. :type label: str :param locked: A value indicating whether any user operations are permitted on this page and the contents of this page :type locked: bool :param order: Order in which the page should appear in the layout. :type order: int :param overridden: A value indicating whether this layout node has been overridden by a child layout. :type overridden: bool :param page_type: The icon for the page. :type page_type: object :param sections: The sections of the page. :type sections: list of :class:`Section <work-item-tracking.v4_0.models.Section>` :param visible: A value indicating if the page should be hidden or not. :type visible: bool """ _attribute_map = { 'contribution': {'key': 'contribution', 'type': 'WitContribution'}, 'id': {'key': 'id', 'type': 'str'}, 'inherited': {'key': 'inherited', 'type': 'bool'}, 'is_contribution': {'key': 'isContribution', 'type': 'bool'}, 'label': {'key': 'label', 'type': 'str'}, 'locked': {'key': 'locked', 'type': 'bool'}, 'order': {'key': 'order', 'type': 'int'}, 'overridden': {'key': 'overridden', 'type': 'bool'}, 'page_type': {'key': 'pageType', 'type': 'object'}, 'sections': {'key': 'sections', 'type': '[Section]'}, 'visible': {'key': 'visible', 'type': 'bool'} } def __init__(self, contribution=None, id=None, inherited=None, is_contribution=None, label=None, locked=None, order=None, overridden=None, page_type=None, sections=None, visible=None): super(Page, self).__init__() self.contribution = contribution self.id = id self.inherited = inherited self.is_contribution = is_contribution self.label = label self.locked = locked self.order = order self.overridden = overridden self.page_type = page_type self.sections = sections self.visible = visible class ProcessModel(Model): """ProcessModel. :param description: :type description: str :param name: :type name: str :param projects: :type projects: list of :class:`ProjectReference <work-item-tracking.v4_0.models.ProjectReference>` :param properties: :type properties: :class:`ProcessProperties <work-item-tracking.v4_0.models.ProcessProperties>` :param reference_name: :type reference_name: str :param type_id: :type type_id: str """ _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'projects': {'key': 'projects', 'type': '[ProjectReference]'}, 'properties': {'key': 'properties', 'type': 'ProcessProperties'}, 'reference_name': {'key': 'referenceName', 'type': 'str'}, 'type_id': {'key': 'typeId', 'type': 'str'} } def __init__(self, description=None, name=None, projects=None, properties=None, reference_name=None, type_id=None): super(ProcessModel, self).__init__() self.description = description self.name = name self.projects = projects self.properties = properties self.reference_name = reference_name self.type_id = type_id class ProcessProperties(Model): """ProcessProperties. :param class_: :type class_: object :param is_default: :type is_default: bool :param is_enabled: :type is_enabled: bool :param parent_process_type_id: :type parent_process_type_id: str :param version: :type version: str """ _attribute_map = { 'class_': {'key': 'class', 'type': 'object'}, 'is_default': {'key': 'isDefault', 'type': 'bool'}, 'is_enabled': {'key': 'isEnabled', 'type': 'bool'}, 'parent_process_type_id': {'key': 'parentProcessTypeId', 'type': 'str'}, 'version': {'key': 'version', 'type': 'str'} } def __init__(self, class_=None, is_default=None, is_enabled=None, parent_process_type_id=None, version=None): super(ProcessProperties, self).__init__() self.class_ = class_ self.is_default = is_default self.is_enabled = is_enabled self.parent_process_type_id = parent_process_type_id self.version = version class ProjectReference(Model): """ProjectReference. :param description: :type description: str :param id: :type id: str :param name: :type name: str :param url: :type url: str """ _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'url': {'key': 'url', 'type': 'str'} } def __init__(self, description=None, id=None, name=None, url=None): super(ProjectReference, self).__init__() self.description = description self.id = id self.name = name self.url = url class RuleActionModel(Model): """RuleActionModel. :param action_type: :type action_type: str :param target_field: :type target_field: str :param value: :type value: str """ _attribute_map = { 'action_type': {'key': 'actionType', 'type': 'str'}, 'target_field': {'key': 'targetField', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'} } def __init__(self, action_type=None, target_field=None, value=None): super(RuleActionModel, self).__init__() self.action_type = action_type self.target_field = target_field self.value = value class RuleConditionModel(Model): """RuleConditionModel. :param condition_type: :type condition_type: str :param field: :type field: str :param value: :type value: str """ _attribute_map = { 'condition_type': {'key': 'conditionType', 'type': 'str'}, 'field': {'key': 'field', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'} } def __init__(self, condition_type=None, field=None, value=None): super(RuleConditionModel, self).__init__() self.condition_type = condition_type self.field = field self.value = value class Section(Model): """Section. :param groups: :type groups: list of :class:`Group <work-item-tracking.v4_0.models.Group>` :param id: The id for the layout node. :type id: str :param overridden: A value indicating whether this layout node has been overridden by a child layout. :type overridden: bool """ _attribute_map = { 'groups': {'key': 'groups', 'type': '[Group]'}, 'id': {'key': 'id', 'type': 'str'}, 'overridden': {'key': 'overridden', 'type': 'bool'} } def __init__(self, groups=None, id=None, overridden=None): super(Section, self).__init__() self.groups = groups self.id = id self.overridden = overridden class UpdateProcessModel(Model): """UpdateProcessModel. :param description: :type description: str :param is_default: :type is_default: bool :param is_enabled: :type is_enabled: bool :param name: :type name: str """ _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'is_default': {'key': 'isDefault', 'type': 'bool'}, 'is_enabled': {'key': 'isEnabled', 'type': 'bool'}, 'name': {'key': 'name', 'type': 'str'} } def __init__(self, description=None, is_default=None, is_enabled=None, name=None): super(UpdateProcessModel, self).__init__() self.description = description self.is_default = is_default self.is_enabled = is_enabled self.name = name class WitContribution(Model): """WitContribution. :param contribution_id: The id for the contribution. :type contribution_id: str :param height: The height for the contribution. :type height: int :param inputs: A dictionary holding key value pairs for contribution inputs. :type inputs: dict :param show_on_deleted_work_item: A value indicating if the contribution should be show on deleted workItem. :type show_on_deleted_work_item: bool """ _attribute_map = { 'contribution_id': {'key': 'contributionId', 'type': 'str'}, 'height': {'key': 'height', 'type': 'int'}, 'inputs': {'key': 'inputs', 'type': '{object}'}, 'show_on_deleted_work_item': {'key': 'showOnDeletedWorkItem', 'type': 'bool'} } def __init__(self, contribution_id=None, height=None, inputs=None, show_on_deleted_work_item=None): super(WitContribution, self).__init__() self.contribution_id = contribution_id self.height = height self.inputs = inputs self.show_on_deleted_work_item = show_on_deleted_work_item class WorkItemBehavior(Model): """WorkItemBehavior. :param abstract: :type abstract: bool :param color: :type color: str :param description: :type description: str :param fields: :type fields: list of :class:`WorkItemBehaviorField <work-item-tracking.v4_0.models.WorkItemBehaviorField>` :param id: :type id: str :param inherits: :type inherits: :class:`WorkItemBehaviorReference <work-item-tracking.v4_0.models.WorkItemBehaviorReference>` :param name: :type name: str :param overriden: :type overriden: bool :param rank: :type rank: int :param url: :type url: str """ _attribute_map = { 'abstract': {'key': 'abstract', 'type': 'bool'}, 'color': {'key': 'color', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'fields': {'key': 'fields', 'type': '[WorkItemBehaviorField]'}, 'id': {'key': 'id', 'type': 'str'}, 'inherits': {'key': 'inherits', 'type': 'WorkItemBehaviorReference'}, 'name': {'key': 'name', 'type': 'str'}, 'overriden': {'key': 'overriden', 'type': 'bool'}, 'rank': {'key': 'rank', 'type': 'int'}, 'url': {'key': 'url', 'type': 'str'} } def __init__(self, abstract=None, color=None, description=None, fields=None, id=None, inherits=None, name=None, overriden=None, rank=None, url=None): super(WorkItemBehavior, self).__init__() self.abstract = abstract self.color = color self.description = description self.fields = fields self.id = id self.inherits = inherits self.name = name self.overriden = overriden self.rank = rank self.url = url class WorkItemBehaviorField(Model): """WorkItemBehaviorField. :param behavior_field_id: :type behavior_field_id: str :param id: :type id: str :param url: :type url: str """ _attribute_map = { 'behavior_field_id': {'key': 'behaviorFieldId', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'url': {'key': 'url', 'type': 'str'} } def __init__(self, behavior_field_id=None, id=None, url=None): super(WorkItemBehaviorField, self).__init__() self.behavior_field_id = behavior_field_id self.id = id self.url = url class WorkItemBehaviorReference(Model): """WorkItemBehaviorReference. :param id: :type id: str :param url: :type url: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'url': {'key': 'url', 'type': 'str'} } def __init__(self, id=None, url=None): super(WorkItemBehaviorReference, self).__init__() self.id = id self.url = url class WorkItemStateResultModel(Model): """WorkItemStateResultModel. :param color: :type color: str :param hidden: :type hidden: bool :param id: :type id: str :param name: :type name: str :param order: :type order: int :param state_category: :type state_category: str :param url: :type url: str """ _attribute_map = { 'color': {'key': 'color', 'type': 'str'}, 'hidden': {'key': 'hidden', 'type': 'bool'}, 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'order': {'key': 'order', 'type': 'int'}, 'state_category': {'key': 'stateCategory', 'type': 'str'}, 'url': {'key': 'url', 'type': 'str'} } def __init__(self, color=None, hidden=None, id=None, name=None, order=None, state_category=None, url=None): super(WorkItemStateResultModel, self).__init__() self.color = color self.hidden = hidden self.id = id self.name = name self.order = order self.state_category = state_category self.url = url class WorkItemTypeBehavior(Model): """WorkItemTypeBehavior. :param behavior: :type behavior: :class:`WorkItemBehaviorReference <work-item-tracking.v4_0.models.WorkItemBehaviorReference>` :param is_default: :type is_default: bool :param url: :type url: str """ _attribute_map = { 'behavior': {'key': 'behavior', 'type': 'WorkItemBehaviorReference'}, 'is_default': {'key': 'isDefault', 'type': 'bool'}, 'url': {'key': 'url', 'type': 'str'} } def __init__(self, behavior=None, is_default=None, url=None): super(WorkItemTypeBehavior, self).__init__() self.behavior = behavior self.is_default = is_default self.url = url class WorkItemTypeModel(Model): """WorkItemTypeModel. :param behaviors: :type behaviors: list of :class:`WorkItemTypeBehavior <work-item-tracking.v4_0.models.WorkItemTypeBehavior>` :param class_: :type class_: object :param color: :type color: str :param description: :type description: str :param icon: :type icon: str :param id: :type id: str :param inherits: Parent WIT Id/Internal ReferenceName that it inherits from :type inherits: str :param is_disabled: :type is_disabled: bool :param layout: :type layout: :class:`FormLayout <work-item-tracking.v4_0.models.FormLayout>` :param name: :type name: str :param states: :type states: list of :class:`WorkItemStateResultModel <work-item-tracking.v4_0.models.WorkItemStateResultModel>` :param url: :type url: str """ _attribute_map = { 'behaviors': {'key': 'behaviors', 'type': '[WorkItemTypeBehavior]'}, 'class_': {'key': 'class', 'type': 'object'}, 'color': {'key': 'color', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'icon': {'key': 'icon', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'inherits': {'key': 'inherits', 'type': 'str'}, 'is_disabled': {'key': 'isDisabled', 'type': 'bool'}, 'layout': {'key': 'layout', 'type': 'FormLayout'}, 'name': {'key': 'name', 'type': 'str'}, 'states': {'key': 'states', 'type': '[WorkItemStateResultModel]'}, 'url': {'key': 'url', 'type': 'str'} } def __init__(self, behaviors=None, class_=None, color=None, description=None, icon=None, id=None, inherits=None, is_disabled=None, layout=None, name=None, states=None, url=None): super(WorkItemTypeModel, self).__init__() self.behaviors = behaviors self.class_ = class_ self.color = color self.description = description self.icon = icon self.id = id self.inherits = inherits self.is_disabled = is_disabled self.layout = layout self.name = name self.states = states self.url = url
33.316857
223
0.608666
2,966
26,287
5.234997
0.070128
0.029755
0.015586
0.022026
0.576351
0.514523
0.4516
0.39222
0.360726
0.321569
0
0.00189
0.235021
26,287
788
224
33.359137
0.770252
0.345494
0
0.498599
0
0
0.211704
0.013192
0
0
0
0
0
1
0.061625
false
0
0.002801
0
0.187675
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a56af27e9c2f89056b41302d904e7b48a70d0c4
1,379
py
Python
responder.py
ziggyzacks/pyrecs
06e760aad4b49a62322f1d46660c52c81eeb1b11
[ "MIT" ]
2
2018-03-21T17:55:57.000Z
2018-03-21T17:56:00.000Z
responder.py
ziggyzacks/pyrecs
06e760aad4b49a62322f1d46660c52c81eeb1b11
[ "MIT" ]
null
null
null
responder.py
ziggyzacks/pyrecs
06e760aad4b49a62322f1d46660c52c81eeb1b11
[ "MIT" ]
null
null
null
import abc from utils import LogMixin class Reponse(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod def redis(self): """ redis connection """ return @abc.abstractmethod def fetch(self, ids): """ hydrate relevant ids with data """ return class Movies(Reponse, LogMixin): DEFAULT_FIELDS = ['title', 'year', 'genres'] def __init__(self, **kwargs): super().__init__() for key, value in kwargs.items(): setattr(self, key, value) def fetch(self, movies, fields=None, from_index=False): """ hydrates class ids with metadata, return redis pipeline that must be executed """ if fields is None: fields = Movies.DEFAULT_FIELDS if from_index: movies = self.redis.mget(('inverse:index:movie:{}'.format(idx) for idx in movies)) response = [] for movie in movies: values = self.redis.hmget('movie:{}'.format(movie), fields) obj = dict(zip(fields, values)) if 'genres' in obj: obj['genres'] = obj['genres'].split(',') if 'year' in obj: obj['year'] = int(obj['year']) response.append(obj) return response def movie_to_index(self, movies): return self.redis.mget(('index:movie:{}'.format(m) for m in movies))
28.729167
94
0.57723
161
1,379
4.832298
0.403727
0.046272
0.051414
0
0
0
0
0
0
0
0
0
0.294416
1,379
47
95
29.340426
0.799589
0.092096
0
0.121212
0
0
0.073052
0.017857
0
0
0
0
0
1
0.151515
false
0
0.060606
0.030303
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a58853ac66bc8f5b8cfad78774a49e43b593fba
2,786
py
Python
src/mem/slicc/ast/TypeDeclAST.py
qianlong4526888/haha
01baf923693873c11ae072ce4dde3d8f1d7b6239
[ "BSD-3-Clause" ]
135
2016-10-21T03:31:49.000Z
2022-03-25T01:22:20.000Z
src/mem/slicc/ast/TypeDeclAST.py
qianlong4526888/haha
01baf923693873c11ae072ce4dde3d8f1d7b6239
[ "BSD-3-Clause" ]
148
2018-07-20T00:58:36.000Z
2021-11-16T01:52:33.000Z
src/mem/slicc/ast/TypeDeclAST.py
qianlong4526888/haha
01baf923693873c11ae072ce4dde3d8f1d7b6239
[ "BSD-3-Clause" ]
48
2016-12-08T12:03:13.000Z
2022-02-16T09:16:13.000Z
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood # Copyright (c) 2009 The Hewlett-Packard Development Company # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from slicc.ast.DeclAST import DeclAST from slicc.symbols.Type import Type class TypeDeclAST(DeclAST): def __init__(self, slicc, type_ast, pairs, field_asts): super(TypeDeclAST, self).__init__(slicc, pairs) self.type_ast = type_ast self.field_asts = field_asts def __repr__(self): return "[TypeDecl: %r]" % (self.type_ast) def files(self, parent=None): if "external" in self: return set() if parent: ident = "%s_%s" % (parent, self.type_ast.ident) else: ident = self.type_ast.ident return set(("%s.hh" % ident, "%s.cc" % ident)) def generate(self): ident = str(self.type_ast) machine = self.symtab.state_machine # Make the new type new_type = Type(self.symtab, ident, self.location, self.pairs, self.state_machine) if machine: machine.addType(new_type) self.symtab.newSymbol(new_type) self.symtab.pushFrame() # Add all of the fields of the type to it for field in self.field_asts: field.generate(new_type) self.symtab.popFrame()
39.8
72
0.709261
387
2,786
5.028424
0.45478
0.02518
0.028263
0.026208
0.094553
0.069887
0.069887
0.069887
0.069887
0.069887
0
0.005533
0.221464
2,786
69
73
40.376812
0.891655
0.576813
0
0
0
0
0.032174
0
0
0
0
0
0
1
0.137931
false
0
0.068966
0.034483
0.344828
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5d2dc08b304db2757537f331d99b9fccf16fe7
3,064
py
Python
python/sysmap/graph.py
harryherold/sysmap
293e5f0dc22ed709c8fd5c170662e433c039eeab
[ "BSD-3-Clause" ]
1
2020-05-08T13:55:31.000Z
2020-05-08T13:55:31.000Z
python/sysmap/graph.py
harryherold/sysmap
293e5f0dc22ed709c8fd5c170662e433c039eeab
[ "BSD-3-Clause" ]
3
2020-01-16T10:30:28.000Z
2020-01-27T11:23:49.000Z
python/sysmap/graph.py
harryherold/sysmap
293e5f0dc22ed709c8fd5c170662e433c039eeab
[ "BSD-3-Clause" ]
1
2020-01-16T09:08:14.000Z
2020-01-16T09:08:14.000Z
from graphviz import Digraph from collections import namedtuple class NetworkGraph: ''' Representation of the network connections. This class contains the entities in the network e.g. hosts or switches. And the connections between them. ''' Vertex = namedtuple('Vertexes', ['hosts', 'switches']) _edges = [] def _sanitize_edge_connection(self, edge): ''' Update '_to' and '_form' field of a edge. :param edge: One edge connection. :type edge: dict :returns: Updated edge with _to and _from key. :rtype: dict ''' if edge['to_guid'].startswith('S'): to_collection = 'switches/' elif edge['to_guid'].startswith('H'): to_collection = 'hosts/' if edge['from_guid'].startswith('S'): from_collection = 'switches/' elif edge['from_guid'].startswith('H'): from_collection = 'hosts/' edge.update({ '_to': to_collection + edge['to_guid'], '_from': from_collection + edge['from_guid'] }) return edge def _sanitize_vertexes(self, vertex): ''' Update '_key' field of vertex to appropriate guid. :param vertex: Vertex :type vertex: dict :returns: An updated dict, '_key' field with 'guid' value. :rtype: dict ''' vertex.update({'_key': vertex['guid']}) return vertex def __init__(self, hsts=None, switches=None, connections=None): self._vertexes = self.Vertex(hosts=[self._sanitize_vertexes(h) for h in hsts], switches=[self._sanitize_vertexes(s) for s in switches]) self._edges = [self._sanitize_edge_connection(c) for c in connections] @property def vertexes(self): ''' Returns a concatenated list of all vertexes. :returns: List of vertexes, contains of hosts and switches. :rtype: List[dict] ''' return self._vertexes.hosts + self._vertexes.switches @property def switches(self): ''' Returns a list of all 'switch' vertexes. :returns: List of all switches. :rtype: List[dict] ''' return self._vertexes.switches @property def hosts(self): ''' Returns a list of all 'host' vertexes. :returns: List of all hosts. :rtype: List[dict] ''' return self._vertexes.hosts @property def edges(self): ''' Return a list of all 'connection' edges. :returns: List of all connections. :rtype: List[dict] ''' return self._edges def to_graph(self, graphargs): ''' Draw a dot graph of the network graph. :params graphargs: Arguments to graphviz.Digraph. :type graphargs: dict ''' graph = Digraph(**graphargs) for v in self._vertexes: graph.node(v['guid'], v['description']) for c in self._edges: graph.edge(c['from_guid'], c['to_guid']) graph.render()
29.461538
92
0.582572
351
3,064
4.94302
0.225071
0.027666
0.036311
0.043804
0.162536
0.092795
0.068588
0
0
0
0
0
0.304504
3,064
103
93
29.747573
0.814172
0.320496
0
0.088889
0
0
0.083102
0
0
0
0
0
0
1
0.177778
false
0
0.044444
0
0.422222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5d6681ce10a3af268cfdb475c6d9aff87499c6
1,211
py
Python
png/imageRecognition_Simple.py
tanthanadon/senior
89fc24889b34860982b551e5ea5e0d3550505f65
[ "MIT" ]
null
null
null
png/imageRecognition_Simple.py
tanthanadon/senior
89fc24889b34860982b551e5ea5e0d3550505f65
[ "MIT" ]
5
2020-03-04T13:49:10.000Z
2020-03-20T04:06:23.000Z
png/imageRecognition_Simple.py
tanthanadon/senior
89fc24889b34860982b551e5ea5e0d3550505f65
[ "MIT" ]
null
null
null
from math import sqrt from skimage import data from skimage.feature import blob_dog, blob_log, blob_doh from skimage.color import rgb2gray from skimage import io import matplotlib.pyplot as plt image = io.imread("star.jpg") image_gray = rgb2gray(image) blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1) # Compute radii in the 3rd column. blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2) blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1) blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2) blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01) blobs_list = [blobs_log, blobs_dog, blobs_doh] colors = ['yellow', 'lime', 'red'] titles = ['Laplacian of Gaussian', 'Difference of Gaussian', 'Determinant of Hessian'] sequence = zip(blobs_list, colors, titles) fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True) ax = axes.ravel() for idx, (blobs, color, title) in enumerate(sequence): ax[idx].set_title(title) ax[idx].imshow(image) for blob in blobs: y, x, r = blob c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False) ax[idx].add_patch(c) ax[idx].set_axis_off() plt.tight_layout() plt.show()
28.833333
74
0.696945
195
1,211
4.169231
0.435897
0.054121
0.04428
0.062731
0.092251
0.068881
0
0
0
0
0
0.025641
0.162675
1,211
42
75
28.833333
0.776134
0.026424
0
0
0
0
0.073005
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5e3b1295194140be07e7851df9a2e6e39cc960
529
py
Python
Day22_Pong/ball.py
syt1209/PythonProjects
0409dbd3c0b0ddf00debc38875059c828eb31dec
[ "MIT" ]
1
2021-02-16T00:59:29.000Z
2021-02-16T00:59:29.000Z
Day22_Pong/ball.py
syt1209/PythonProjects
0409dbd3c0b0ddf00debc38875059c828eb31dec
[ "MIT" ]
null
null
null
Day22_Pong/ball.py
syt1209/PythonProjects
0409dbd3c0b0ddf00debc38875059c828eb31dec
[ "MIT" ]
null
null
null
from turtle import Turtle SPEED = 10 class Ball(Turtle): def __init__(self): super().__init__() self.penup() self.color("white") self.shape("circle") self.move_speed = 0.1 self.y_bounce = 1 self.x_bounce = 1 def move(self): new_x = self.xcor() + SPEED*self.x_bounce new_y = self.ycor() + SPEED*self.y_bounce self.goto(new_x, new_y) def reset(self): self.goto(0, 0) self.move_speed = 0.1 self.x_bounce *= -1
20.346154
49
0.551985
76
529
3.592105
0.355263
0.054945
0.120879
0.102564
0.216117
0.139194
0
0
0
0
0
0.030641
0.321361
529
25
50
21.16
0.729805
0
0
0.105263
0
0
0.020833
0
0
0
0
0
0
1
0.157895
false
0
0.052632
0
0.263158
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5ec6dd61aef0b828a5fdf8e68715be0262b256
103,584
py
Python
src/sage/modular/dirichlet.py
hsm207/sage
020bd59ec28717bfab9af44d2231c53da1ff99f1
[ "BSL-1.0" ]
1
2021-10-18T01:24:04.000Z
2021-10-18T01:24:04.000Z
src/sage/modular/dirichlet.py
hsm207/sage
020bd59ec28717bfab9af44d2231c53da1ff99f1
[ "BSL-1.0" ]
null
null
null
src/sage/modular/dirichlet.py
hsm207/sage
020bd59ec28717bfab9af44d2231c53da1ff99f1
[ "BSL-1.0" ]
null
null
null
# -*- coding: utf-8 -*- r""" Dirichlet characters A :class:`DirichletCharacter` is the extension of a homomorphism .. MATH:: (\ZZ/N\ZZ)^* \to R^*, for some ring `R`, to the map `\ZZ/N\ZZ \to R` obtained by sending those `x\in\ZZ/N\ZZ` with `\gcd(N,x)>1` to `0`. EXAMPLES:: sage: G = DirichletGroup(35) sage: x = G.gens() sage: e = x[0]*x[1]^2; e Dirichlet character modulo 35 of conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2 - 1 sage: e.order() 12 This illustrates a canonical coercion:: sage: e = DirichletGroup(5, QQ).0 sage: f = DirichletGroup(5,CyclotomicField(4)).0 sage: e*f Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4 AUTHORS: - William Stein (2005-09-02): Fixed bug in comparison of Dirichlet characters. It was checking that their values were the same, but not checking that they had the same level! - William Stein (2006-01-07): added more examples - William Stein (2006-05-21): added examples of everything; fix a *lot* of tiny bugs and design problem that became clear when creating examples. - Craig Citro (2008-02-16): speed up __call__ method for Dirichlet characters, miscellaneous fixes - Julian Rueth (2014-03-06): use UniqueFactory to cache DirichletGroups """ # **************************************************************************** # Copyright (C) 2004-2006 William Stein <[email protected]> # Copyright (C) 2014 Julian Rueth <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # https://www.gnu.org/licenses/ # **************************************************************************** from __future__ import print_function import sage.categories.all as cat from sage.misc.all import prod import sage.misc.prandom as random import sage.modules.free_module as free_module import sage.modules.free_module_element as free_module_element import sage.rings.all as rings import sage.rings.number_field.number_field as number_field from sage.libs.pari import pari from sage.categories.map import Map from sage.rings.rational_field import is_RationalField from sage.rings.complex_mpfr import is_ComplexField from sage.rings.qqbar import is_AlgebraicField from sage.rings.ring import is_Ring from sage.misc.functional import round from sage.misc.cachefunc import cached_method from sage.misc.fast_methods import WithEqualityById from sage.structure.element import MultiplicativeGroupElement from sage.structure.gens_py import multiplicative_iterator from sage.structure.parent import Parent from sage.structure.sequence import Sequence from sage.structure.factory import UniqueFactory from sage.structure.richcmp import richcmp from sage.arith.all import (binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation) def trivial_character(N, base_ring=rings.RationalField()): r""" Return the trivial character of the given modulus, with values in the given base ring. EXAMPLES:: sage: t = trivial_character(7) sage: [t(x) for x in [0..20]] [0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1] sage: t(1).parent() Rational Field sage: trivial_character(7, Integers(3))(1).parent() Ring of integers modulo 3 """ return DirichletGroup(N, base_ring)(1) TrivialCharacter = trivial_character def kronecker_character(d): """ Return the quadratic Dirichlet character (d/.) of minimal conductor. EXAMPLES:: sage: kronecker_character(97*389*997^2) Dirichlet character modulo 37733 of conductor 37733 mapping 1557 |--> -1, 37346 |--> -1 :: sage: a = kronecker_character(1) sage: b = DirichletGroup(2401,QQ)(a) # NOTE -- over QQ! sage: b.modulus() 2401 AUTHORS: - Jon Hanke (2006-08-06) """ d = rings.Integer(d) if d == 0: raise ValueError("d must be nonzero") D = fundamental_discriminant(d) G = DirichletGroup(abs(D), rings.RationalField()) return G([kronecker(D,u) for u in G.unit_gens()]) def kronecker_character_upside_down(d): """ Return the quadratic Dirichlet character (./d) of conductor d, for d0. EXAMPLES:: sage: kronecker_character_upside_down(97*389*997^2) Dirichlet character modulo 37506941597 of conductor 37733 mapping 13533432536 |--> -1, 22369178537 |--> -1, 14266017175 |--> 1 AUTHORS: - Jon Hanke (2006-08-06) """ d = rings.Integer(d) if d <= 0: raise ValueError("d must be positive") G = DirichletGroup(d, rings.RationalField()) return G([kronecker(u.lift(),d) for u in G.unit_gens()]) def is_DirichletCharacter(x): r""" Return True if x is of type DirichletCharacter. EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletCharacter sage: is_DirichletCharacter(trivial_character(3)) True sage: is_DirichletCharacter([1]) False """ return isinstance(x, DirichletCharacter) class DirichletCharacter(MultiplicativeGroupElement): """ A Dirichlet character. """ def __init__(self, parent, x, check=True): r""" Create a Dirichlet character with specified values on generators of `(\ZZ/n\ZZ)^*`. INPUT: - ``parent`` -- :class:`DirichletGroup`, a group of Dirichlet characters - ``x`` -- one of the following: - tuple or list of ring elements: the values of the Dirichlet character on the standard generators of `(\ZZ/N\ZZ)^*` as returned by :meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`. - vector over `\ZZ/e\ZZ`, where `e` is the order of the standard root of unity for ``parent``. In both cases, the orders of the elements must divide the orders of the respective generators of `(\ZZ/N\ZZ)^*`. OUTPUT: The Dirichlet character defined by `x` (type :class:`DirichletCharacter`). EXAMPLES:: sage: G.<e> = DirichletGroup(13) sage: G Group of Dirichlet characters modulo 13 with values in Cyclotomic Field of order 12 and degree 4 sage: e Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12 sage: loads(e.dumps()) == e True :: sage: G, x = DirichletGroup(35).objgens() sage: e = x[0]*x[1]; e Dirichlet character modulo 35 of conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2 sage: e.order() 12 sage: loads(e.dumps()) == e True TESTS:: sage: G = DirichletGroup(10) sage: TestSuite(G[1]).run() It is checked that the orders of the elements in `x` are admissible (see :trac:`17283`):: sage: k.<i> = CyclotomicField(4) sage: G = DirichletGroup(192) sage: G([i, -1, -1]) Traceback (most recent call last): ... ValueError: values (= (zeta16^4, -1, -1)) must have multiplicative orders dividing (2, 16, 2), respectively sage: from sage.modular.dirichlet import DirichletCharacter sage: M = FreeModule(Zmod(16), 3) sage: DirichletCharacter(G, M([4, 8, 8])) Traceback (most recent call last): ... ValueError: values (= (4, 8, 8) modulo 16) must have additive orders dividing (2, 16, 2), respectively """ MultiplicativeGroupElement.__init__(self, parent) if check: orders = parent.integers_mod().unit_group().gens_orders() if len(x) != len(orders): raise ValueError("wrong number of values (= {}) on generators (want {})".format(x, len(orders))) if free_module_element.is_FreeModuleElement(x): x = parent._module(x) if any(u * v for u, v in zip(x, orders)): raise ValueError("values (= {} modulo {}) must have additive orders dividing {}, respectively" .format(x, parent.zeta_order(), orders)) self.element.set_cache(x) else: R = parent.base_ring() x = tuple(map(R, x)) if R.is_exact() and any(u**v != 1 for u, v in zip(x, orders)): raise ValueError("values (= {}) must have multiplicative orders dividing {}, respectively" .format(x, orders)) self.values_on_gens.set_cache(x) else: if free_module_element.is_FreeModuleElement(x): self.element.set_cache(x) else: self.values_on_gens.set_cache(x) @cached_method def __eval_at_minus_one(self): r""" Efficiently evaluate the character at -1 using knowledge of its order. This is potentially much more efficient than computing the value of -1 directly using dlog and a large power of the image root of unity. We use the following. Proposition: Suppose eps is a character mod `p^n`, where `p` is a prime. Then `\varepsilon(-1) = -1` if and only if `p = 2` and the factor of eps at 4 is nontrivial or `p > 2` and 2 does not divide `\phi(p^n)/\mbox{\rm ord}(\varepsilon)`. EXAMPLES:: sage: chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one() -1 """ D = self.decomposition() val = self.base_ring()(1) for e in D: if e.modulus() % 2 == 0: if e.modulus() % 4 == 0: val *= e.values_on_gens()[0] # first gen is -1 for 2-power modulus elif (euler_phi(e.parent().modulus()) / e.order()) % 2: val *= -1 return val def __call__(self, m): """ Return the value of this character at the integer `m`. .. warning:: A table of values of the character is made the first time you call this (unless `m` equals -1) EXAMPLES:: sage: G = DirichletGroup(60) sage: e = prod(G.gens(), G(1)) sage: e Dirichlet character modulo 60 of conductor 60 mapping 31 |--> -1, 41 |--> -1, 37 |--> zeta4 sage: e(-1) -1 sage: e(2) 0 sage: e(7) -zeta4 sage: Integers(60).unit_gens() (31, 41, 37) sage: e(31) -1 sage: e(41) -1 sage: e(37) zeta4 sage: e(31*37) -zeta4 sage: parent(e(31*37)) Cyclotomic Field of order 4 and degree 2 """ N = self.modulus() m = m % N if self.values.is_in_cache() or m != N - 1: return self.values()[m] else: return self.__eval_at_minus_one() def change_ring(self, R): """ Return the base extension of ``self`` to ``R``. INPUT: - ``R`` -- either a ring admitting a conversion map from the base ring of ``self``, or a ring homomorphism with the base ring of ``self`` as its domain EXAMPLES:: sage: e = DirichletGroup(7, QQ).0 sage: f = e.change_ring(QuadraticField(3, 'a')) sage: f.parent() Group of Dirichlet characters modulo 7 with values in Number Field in a with defining polynomial x^2 - 3 with a = 1.732050807568878? :: sage: e = DirichletGroup(13).0 sage: e.change_ring(QQ) Traceback (most recent call last): ... TypeError: Unable to coerce zeta12 to a rational We test the case where `R` is a map (:trac:`18072`):: sage: K.<i> = QuadraticField(-1) sage: chi = DirichletGroup(5, K)[1] sage: chi(2) i sage: f = K.complex_embeddings()[0] sage: psi = chi.change_ring(f) sage: psi(2) -1.83697019872103e-16 - 1.00000000000000*I """ if self.base_ring() is R: return self G = self.parent().change_ring(R) return G.element_class(G, [R(x) for x in self.values_on_gens()]) def _richcmp_(self, other, op): """ Compare ``self`` to ``other``. .. NOTE:: Since there is no coercion between Dirichlet groups of different moduli, characters of different moduli compare as unequal, even if they define identical functions on ``ZZ``. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: f = e.restrict(8) sage: e == e True sage: f == f True sage: e == f False sage: k = DirichletGroup(7)([-1]) sage: k == e False """ return richcmp(self.values_on_gens(), other.values_on_gens(), op) def __hash__(self): """ Return the hash of ``self``. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: hash(e) == hash((-1,1)) True """ return hash(self.values_on_gens()) def __invert__(self): """ Return the multiplicative inverse of self. EXAMPLES:: sage: e = DirichletGroup(13).0 sage: f = ~e sage: f*e Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1 """ G = self.parent() if G.zeta.is_in_cache(): x = -self.element() else: x = tuple(~z for z in self.values_on_gens()) return G.element_class(G, x, check=False) def _mul_(self, other): """ Return the product of self and other. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1 sage: b Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4 sage: a*b # indirect doctest Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> zeta4 Multiplying elements whose parents have different zeta orders works:: sage: a = DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1) sage: b = DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1]) sage: a * b # indirect doctest Dirichlet character modulo 3 of conductor 3 mapping 2 |--> -1 """ G = self.parent() if G.zeta.is_in_cache(): x = self.element() + other.element() else: x = tuple(y * z for y, z in zip(self.values_on_gens(), other.values_on_gens())) return G.element_class(G, x, check=False) def __copy__(self): """ Return a (shallow) copy of this Dirichlet character. EXAMPLES:: sage: G.<a> = DirichletGroup(11) sage: b = copy(a) sage: a is b False sage: a.element() is b.element() False sage: a.values_on_gens() is b.values_on_gens() True """ # This method exists solely because of a bug in the cPickle module -- # see modsym/manin_symbols.py. G = self.parent() return G.element_class(G, self.values_on_gens(), check=False) def __pow__(self, n): """ Return self raised to the power of n EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a^2 Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1 sage: b^2 Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1 """ G = self.parent() if G.zeta.is_in_cache(): x = n * self.element() else: x = tuple(z**n for z in self.values_on_gens()) return G.element_class(G, x, check=False) def _repr_short_(self): r""" A short string representation of self, often used in string representations of modular forms EXAMPLES:: sage: chi = DirichletGroup(24).0 sage: chi._repr_short_() '[-1, 1, 1]' """ return str(list(self.values_on_gens())) def _repr_(self): """ String representation of self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: repr(a) # indirect doctest 'Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1' TESTS: Dirichlet characters modulo 1 and 2 are printed correctly (see :trac:`17338`):: sage: DirichletGroup(1)[0] Dirichlet character modulo 1 of conductor 1 sage: DirichletGroup(2)[0] Dirichlet character modulo 2 of conductor 1 """ s = 'Dirichlet character modulo %s of conductor %s' % (self.modulus(), self.conductor()) r = len(self.values_on_gens()) if r != 0: s += ' mapping ' for i in range(r): if i != 0: s += ', ' s += str(self.parent().unit_gens()[i]) + ' |--> ' + str(self.values_on_gens()[i]) return s def _latex_(self): r""" LaTeX representation of self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(16) sage: latex(b) # indirect doctest \hbox{Dirichlet character modulo } 16 \hbox{ of conductor } 16 \hbox{ mapping } 15 \mapsto 1,\ 5 \mapsto \zeta_{4} TESTS: Dirichlet characters modulo 1 and 2 are printed correctly (see :trac:`17338`):: sage: latex(DirichletGroup(1)[0]) \hbox{Dirichlet character modulo } 1 \hbox{ of conductor } 1 sage: latex(DirichletGroup(2)[0]) \hbox{Dirichlet character modulo } 2 \hbox{ of conductor } 1 """ s = r'\hbox{Dirichlet character modulo } %s \hbox{ of conductor } %s' % (self.modulus(), self.conductor()) r = len(self.values_on_gens()) if r != 0: s += r' \hbox{ mapping } ' for i in range(r): if i != 0: s += r',\ ' s += self.parent().unit_gens()[i]._latex_() + r' \mapsto ' + self.values_on_gens()[i]._latex_() return s def base_ring(self): """ Returns the base ring of this Dirichlet character. EXAMPLES:: sage: G = DirichletGroup(11) sage: G.gen(0).base_ring() Cyclotomic Field of order 10 and degree 4 sage: G = DirichletGroup(11, RationalField()) sage: G.gen(0).base_ring() Rational Field """ return self.parent().base_ring() def bar(self): """ Return the complex conjugate of this Dirichlet character. EXAMPLES:: sage: e = DirichletGroup(5).0 sage: e Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4 sage: e.bar() Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4 """ return ~self def bernoulli(self, k, algorithm='recurrence', cache=True, **opts): r""" Returns the generalized Bernoulli number `B_{k,eps}`. INPUT: - ``k`` -- a non-negative integer - ``algorithm`` -- either ``'recurrence'`` (default) or ``'definition'`` - ``cache`` -- if True, cache answers - ``**opts`` -- optional arguments; not used directly, but passed to the :func:`bernoulli` function if this is called OUTPUT: Let `\varepsilon` be a (not necessarily primitive) character of modulus `N`. This function returns the generalized Bernoulli number `B_{k,\varepsilon}`, as defined by the following identity of power series (see for example [DI1995]_, Section 2.2): .. MATH:: \sum_{a=1}^N \frac{\varepsilon(a) t e^{at}}{e^{Nt}-1} = sum_{k=0}^{\infty} \frac{B_{k,\varepsilon}}{k!} t^k. ALGORITHM: The ``'recurrence'`` algorithm computes generalized Bernoulli numbers via classical Bernoulli numbers using the formula in [Coh2007]_, Proposition 9.4.5; this is usually optimal. The ``definition`` algorithm uses the definition directly. .. WARNING:: In the case of the trivial Dirichlet character modulo 1, this function returns `B_{1,\varepsilon} = 1/2`, in accordance with the above definition, but in contrast to the value `B_1 = -1/2` for the classical Bernoulli number. Some authors use an alternative definition giving `B_{1,\varepsilon} = -1/2`; see the discussion in [Coh2007]_, Section 9.4.1. EXAMPLES:: sage: G = DirichletGroup(13) sage: e = G.0 sage: e.bernoulli(5) 7430/13*zeta12^3 - 34750/13*zeta12^2 - 11380/13*zeta12 + 9110/13 sage: eps = DirichletGroup(9).0 sage: eps.bernoulli(3) 10*zeta6 + 4 sage: eps.bernoulli(3, algorithm="definition") 10*zeta6 + 4 TESTS: Check that :trac:`17586` is fixed:: sage: DirichletGroup(1)[0].bernoulli(1) 1/2 """ if cache: try: self.__bernoulli except AttributeError: self.__bernoulli = {} if k in self.__bernoulli: return self.__bernoulli[k] N = self.modulus() K = self.base_ring() if N == 1: # By definition, the first Bernoulli number of the trivial # character is 1/2, in contrast to the value B_1 = -1/2. ber = K.one()/2 if k == 1 else K(bernoulli(k)) elif self(-1) != K((-1)**k): ber = K.zero() elif algorithm == "recurrence": # The following code is pretty fast, at least compared to # the other algorithm below. That said, I'm sure it could # be sped up by a factor of 10 or more in many cases, # especially since we end up computing all the Bernoulli # numbers up to k, which should be done with power series # instead of calls to the Bernoulli function. Likewise # computing all binomial coefficients can be done much # more efficiently. v = self.values() S = lambda n: sum(v[r] * r**n for r in range(1, N)) ber = K(sum(binomial(k,j) * bernoulli(j, **opts) * N**(j-1) * S(k-j) for j in range(k+1))) elif algorithm == "definition": # This is better since it computes the same thing, but requires # no arith in a poly ring over a number field. prec = k+2 R = rings.PowerSeriesRing(rings.QQ, 't') t = R.gen() # g(t) = t/(e^{Nt}-1) g = t/((N*t).exp(prec) - 1) # h(n) = g(t)*e^{nt} h = [0] + [g * ((n*t).exp(prec)) for n in range(1,N+1)] ber = sum([self(a)*h[a][k] for a in range(1,N+1)]) * factorial(k) else: raise ValueError("algorithm = '%s' unknown"%algorithm) if cache: self.__bernoulli[k] = ber return ber def lfunction(self, prec=53, algorithm='pari'): """ Return the L-function of ``self``. The result is a wrapper around a PARI L-function or around the ``lcalc`` program. INPUT: - ``prec`` -- precision (default 53) - ``algorithm`` -- 'pari' (default) or 'lcalc' EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: L = a.lfunction(); L PARI L-function associated to Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1 sage: L(4) 0.988944551741105 With the algorithm "lcalc":: sage: a = a.primitive_character() sage: L = a.lfunction(algorithm='lcalc'); L L-function with complex Dirichlet coefficients sage: L.value(4) # abs tol 1e-14 0.988944551741105 - 5.16608739123418e-18*I """ if algorithm is None: algorithm = 'pari' if algorithm == 'pari': from sage.lfunctions.pari import lfun_character, LFunction Z = LFunction(lfun_character(self), prec=prec) Z.rename('PARI L-function associated to %s' % self) return Z elif algorithm == 'lcalc': from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character return Lfunction_from_character(self) raise ValueError('algorithm must be "pari" or "lcalc"') @cached_method def conductor(self): """ Computes and returns the conductor of this character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.conductor() 4 sage: b.conductor() 5 sage: (a*b).conductor() 20 TESTS:: sage: G.<a, b> = DirichletGroup(20) sage: type(G(1).conductor()) <type 'sage.rings.integer.Integer'> """ if self.modulus() == 1 or self.is_trivial(): return rings.Integer(1) F = factor(self.modulus()) if len(F) > 1: return prod([d.conductor() for d in self.decomposition()]) p = F[0][0] # When p is odd, and x =/= 1, the conductor is the smallest p**r such that # Order(x) divides EulerPhi(p**r) = p**(r-1)*(p-1). # For a given r, whether or not the above divisibility holds # depends only on the factor of p**(r-1) on the right hand side. # Since p-1 is coprime to p, this smallest r such that the # divisibility holds equals Valuation(Order(x),p)+1. cond = p**(valuation(self.order(),p) + 1) if p == 2 and F[0][1] > 2 and self.values_on_gens()[1].multiplicative_order() != 1: cond *= 2 return rings.Integer(cond) @cached_method def decomposition(self): r""" Return the decomposition of self as a product of Dirichlet characters of prime power modulus, where the prime powers exactly divide the modulus of this character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: c = a*b sage: d = c.decomposition(); d [Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4] sage: d[0].parent() Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2 sage: d[1].parent() Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2 We can't multiply directly, since coercion of one element into the other parent fails in both cases:: sage: d[0]*d[1] == c Traceback (most recent call last): ... TypeError: unsupported operand parent(s) for *: 'Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2' and 'Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2' We can multiply if we're explicit about where we want the multiplication to take place. :: sage: G(d[0])*G(d[1]) == c True Conductors that are divisible by various powers of 2 present some problems as the multiplicative group modulo `2^k` is trivial for `k = 1` and non-cyclic for `k \ge 3`:: sage: (DirichletGroup(18).0).decomposition() [Dirichlet character modulo 2 of conductor 1, Dirichlet character modulo 9 of conductor 9 mapping 2 |--> zeta6] sage: (DirichletGroup(36).0).decomposition() [Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1, Dirichlet character modulo 9 of conductor 1 mapping 2 |--> 1] sage: (DirichletGroup(72).0).decomposition() [Dirichlet character modulo 8 of conductor 4 mapping 7 |--> -1, 5 |--> 1, Dirichlet character modulo 9 of conductor 1 mapping 2 |--> 1] """ D = self.parent().decomposition() vals = [[z] for z in self.values_on_gens()] if self.modulus() % 8 == 0: # 2 factors at 2. vals[0].append(vals[1][0]) del vals[1] elif self.modulus() % 4 == 2: # 0 factors at 2. vals = [1] + vals return [D[i](vals[i]) for i in range(len(D))] def extend(self, M): """ Returns the extension of this character to a Dirichlet character modulo the multiple M of the modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: H.<c> = DirichletGroup(4) sage: c.extend(20) Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1 sage: a Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1 sage: c.extend(20) == a True """ if M % self.modulus() != 0: raise ArithmeticError("M(=%s) must be a multiple of the modulus(=%s)"%(M,self.modulus())) H = DirichletGroup(M, self.base_ring()) return H(self) def _pari_conversion(self): r""" Prepare data for the conversion of the character to Pari. OUTPUT: pair (G, v) where G is `(\ZZ / N \ZZ)^*` where `N` is the modulus EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage: chi4._pari_conversion() ([[4, [0]], [2, [2], [3]], [[2]~, Vecsmall([2])], [[4], [[1, matrix(0,2)]], Mat(1), [3], [2], [0]], Mat(1)], [1]) sage: chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet character modulo 24 of conductor 24 mapping 7 |--> 1, 13 |--> -1, 17 |--> -1 sage: chi._pari_conversion() ([[24, [0]], [8, [2, 2, 2], [7, 13, 17]], [[2, 2, 3]~, Vecsmall([3, 3, 1])], [[8, 8, 3], [[1, matrix(0,2)], [1, matrix(0,2)], [2, Mat([2, 1])]], [1, 0, 0; 0, 1, 0; 0, 0, 1], [7, 13, 17], [2, 2, 2], [0, 0, 0]], [1, 0, 0; 0, 1, 0; 0, 0, 1]], [0, 1, 1]) """ G = pari.znstar(self.modulus(), 1) pari_orders = G[1][1] pari_gens = G[1][2] # one should use the following, but this does not work # pari_orders = G.cyc() # pari_gens = G.gen() values_on_gens = (self(x) for x in pari_gens) # now compute the input for pari (list of exponents) P = self.parent() if is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument = zeta.argument() v = [int(x.argument() / zeta_argument) for x in values_on_gens] else: dlog = P._zeta_dlog v = [dlog[x] for x in values_on_gens] m = P.zeta_order() v = [(vi * oi) // m for vi, oi in zip(v, pari_orders)] return (G, v) def conrey_number(self): r""" Return the Conrey number for this character. This is a positive integer coprime to q that identifies a Dirichlet character of modulus q. See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage: chi4.conrey_number() 3 sage: chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet character modulo 24 of conductor 24 mapping 7 |--> 1, 13 |--> -1, 17 |--> -1 sage: chi.conrey_number() 5 sage: chi = DirichletGroup(60)([1,-1,I]) sage: chi.conrey_number() 17 sage: chi = DirichletGroup(420)([1,-1,-I,1]) sage: chi.conrey_number() 113 TESTS:: sage: eps1 = DirichletGroup(5)([-1]) sage: eps2 = DirichletGroup(5,QQ)([-1]) sage: eps1.conrey_number() == eps2.conrey_number() True """ G, v = self._pari_conversion() return pari.znconreyexp(G, v).sage() def lmfdb_page(self): r""" Open the LMFDB web page of the character in a browser. See https://www.lmfdb.org EXAMPLES:: sage: E = DirichletGroup(4).gen() sage: E.lmfdb_page() # optional -- webbrowser """ import webbrowser lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}' url = lmfdb_url.format(self.modulus(), self.conrey_number()) webbrowser.open(url) def galois_orbit(self, sort=True): r""" Return the orbit of this character under the action of the absolute Galois group of the prime subfield of the base ring. EXAMPLES:: sage: G = DirichletGroup(30); e = G.1 sage: e.galois_orbit() [Dirichlet character modulo 30 of conductor 5 mapping 11 |--> 1, 7 |--> -zeta4, Dirichlet character modulo 30 of conductor 5 mapping 11 |--> 1, 7 |--> zeta4] Another example:: sage: G = DirichletGroup(13) sage: G.galois_orbits() [ [Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1], ..., [Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -1] ] sage: e = G.0 sage: e Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12 sage: e.galois_orbit() [Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12, Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12^3 + zeta12, Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^3 - zeta12, Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12] sage: e = G.0^2; e Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^2 sage: e.galois_orbit() [Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^2, Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12^2 + 1] A non-example:: sage: chi = DirichletGroup(7, Integers(9), zeta = Integers(9)(2)).0 sage: chi.galois_orbit() Traceback (most recent call last): ... TypeError: Galois orbits only defined if base ring is an integral domain """ if not self.base_ring().is_integral_domain(): raise TypeError("Galois orbits only defined if base ring is an integral domain") k = self.order() if k <= 2: return [self] P = self.parent() z = self.element() o = int(z.additive_order()) Auts = set([m % o for m in P._automorphisms()]) v = [P.element_class(P, m * z, check=False) for m in Auts] if sort: v.sort() return v def gauss_sum(self, a=1): r""" Return a Gauss sum associated to this Dirichlet character. The Gauss sum associated to `\chi` is .. MATH:: g_a(\chi) = \sum_{r \in \ZZ/m\ZZ} \chi(r)\,\zeta^{ar}, where `m` is the modulus of `\chi` and `\zeta` is a primitive `m^{th}` root of unity. FACTS: If the modulus is a prime `p` and the character is nontrivial, then the Gauss sum has absolute value `\sqrt{p}`. CACHING: Computed Gauss sums are *not* cached with this character. EXAMPLES:: sage: G = DirichletGroup(3) sage: e = G([-1]) sage: e.gauss_sum(1) 2*zeta6 - 1 sage: e.gauss_sum(2) -2*zeta6 + 1 sage: norm(e.gauss_sum()) 3 :: sage: G = DirichletGroup(13) sage: e = G.0 sage: e.gauss_sum() -zeta156^46 + zeta156^45 + zeta156^42 + zeta156^41 + 2*zeta156^40 + zeta156^37 - zeta156^36 - zeta156^34 - zeta156^33 - zeta156^31 + 2*zeta156^30 + zeta156^28 - zeta156^24 - zeta156^22 + zeta156^21 + zeta156^20 - zeta156^19 + zeta156^18 - zeta156^16 - zeta156^15 - 2*zeta156^14 - zeta156^10 + zeta156^8 + zeta156^7 + zeta156^6 + zeta156^5 - zeta156^4 - zeta156^2 - 1 sage: factor(norm(e.gauss_sum())) 13^24 TESTS: The field of algebraic numbers is supported (:trac:`19056`):: sage: G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum() -2.440133358345538? + 1.022618791871794?*I Check that :trac:`19060` is fixed:: sage: K.<z> = CyclotomicField(8) sage: G = DirichletGroup(13, K) sage: chi = G([z^2]) sage: chi.gauss_sum() zeta52^22 + zeta52^21 + zeta52^19 - zeta52^16 + zeta52^15 + zeta52^14 + zeta52^12 - zeta52^11 - zeta52^10 - zeta52^7 - zeta52^5 + zeta52^4 Check that :trac:`25127` is fixed:: sage: G = DirichletGroup(1) sage: chi = G.one() sage: chi.gauss_sum() 1 .. SEEALSO:: - :func:`sage.arith.misc.gauss_sum` for general finite fields - :func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic version """ G = self.parent() K = G.base_ring() chi = self m = G.modulus() if is_ComplexField(K): return self.gauss_sum_numerical(a=a) elif is_AlgebraicField(K): L = K zeta = L.zeta(m) elif number_field.is_CyclotomicField(K) or is_RationalField(K): chi = chi.minimize_base_ring() n = lcm(m, G.zeta_order()) L = rings.CyclotomicField(n) zeta = L.gen(0) ** (n // m) else: raise NotImplementedError("Gauss sums only currently implemented when the base ring is a cyclotomic field, QQ, QQbar, or a complex field") zeta = zeta ** a g = L(chi(0)) z = L.one() for c in chi.values()[1:]: z *= zeta g += L(c)*z return g def gauss_sum_numerical(self, prec=53, a=1): r""" Return a Gauss sum associated to this Dirichlet character as an approximate complex number with prec bits of precision. INPUT: - ``prec`` -- integer (default: 53), *bits* of precision - ``a`` -- integer, as for :meth:`gauss_sum`. The Gauss sum associated to `\chi` is .. MATH:: g_a(\chi) = \sum_{r \in \ZZ/m\ZZ} \chi(r)\,\zeta^{ar}, where `m` is the modulus of `\chi` and `\zeta` is a primitive `m^{th}` root of unity. EXAMPLES:: sage: G = DirichletGroup(3) sage: e = G.0 sage: abs(e.gauss_sum_numerical()) 1.7320508075... sage: sqrt(3.0) 1.73205080756888 sage: e.gauss_sum_numerical(a=2) -...e-15 - 1.7320508075...*I sage: e.gauss_sum_numerical(a=2, prec=100) 4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I sage: G = DirichletGroup(13) sage: H = DirichletGroup(13, CC) sage: e = G.0 sage: f = H.0 sage: e.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: f.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: abs(e.gauss_sum_numerical()) 3.60555127546... sage: abs(f.gauss_sum_numerical()) 3.60555127546... sage: sqrt(13.0) 3.60555127546399 TESTS: The field of algebraic numbers is supported (:trac:`19056`):: sage: G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum_numerical() -2.44013335834554 + 1.02261879187179*I """ G = self.parent() K = G.base_ring() if is_ComplexField(K): phi = lambda t : t CC = K elif is_AlgebraicField(K): from sage.rings.complex_mpfr import ComplexField CC = ComplexField(prec) phi = CC.coerce_map_from(K) elif number_field.is_CyclotomicField(K) or is_RationalField(K): phi = K.complex_embedding(prec) CC = phi.codomain() else: raise NotImplementedError("Gauss sums only currently implemented when the base ring is a cyclotomic field, QQ, QQbar, or a complex field") zeta = CC.zeta(G.modulus()) ** a g = phi(self(0)) z = CC.one() for c in self.values()[1:]: z *= zeta g += phi(c)*z return g def jacobi_sum(self, char, check=True): r""" Return the Jacobi sum associated to these Dirichlet characters (i.e., J(self,char)). This is defined as .. MATH:: J(\chi, \psi) = \sum_{a \in \ZZ / N\ZZ} \chi(a) \psi(1-a) where `\chi` and `\psi` are both characters modulo `N`. EXAMPLES:: sage: D = DirichletGroup(13) sage: e = D.0 sage: f = D[-2] sage: e.jacobi_sum(f) 3*zeta12^2 + 2*zeta12 - 3 sage: f.jacobi_sum(e) 3*zeta12^2 + 2*zeta12 - 3 sage: p = 7 sage: DP = DirichletGroup(p) sage: f = DP.0 sage: e.jacobi_sum(f) Traceback (most recent call last): ... NotImplementedError: Characters must be from the same Dirichlet Group. sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j])) ....: for i in range(p-1) for j in range(i, p-1)] sage: for s in all_jacobi_sums: ....: print(s) ((1,), (1,), 5) ((1,), (zeta6,), -1) ((1,), (zeta6 - 1,), -1) ((1,), (-1,), -1) ((1,), (-zeta6,), -1) ((1,), (-zeta6 + 1,), -1) ((zeta6,), (zeta6,), -zeta6 + 3) ((zeta6,), (zeta6 - 1,), 2*zeta6 + 1) ((zeta6,), (-1,), -2*zeta6 - 1) ((zeta6,), (-zeta6,), zeta6 - 3) ((zeta6,), (-zeta6 + 1,), 1) ((zeta6 - 1,), (zeta6 - 1,), -3*zeta6 + 2) ((zeta6 - 1,), (-1,), 2*zeta6 + 1) ((zeta6 - 1,), (-zeta6,), -1) ((zeta6 - 1,), (-zeta6 + 1,), -zeta6 - 2) ((-1,), (-1,), 1) ((-1,), (-zeta6,), -2*zeta6 + 3) ((-1,), (-zeta6 + 1,), 2*zeta6 - 3) ((-zeta6,), (-zeta6,), 3*zeta6 - 1) ((-zeta6,), (-zeta6 + 1,), -2*zeta6 + 3) ((-zeta6 + 1,), (-zeta6 + 1,), zeta6 + 2) Let's check that trivial sums are being calculated correctly:: sage: N = 13 sage: D = DirichletGroup(N) sage: g = D(1) sage: g.jacobi_sum(g) 11 sage: sum([g(x)*g(1-x) for x in IntegerModRing(N)]) 11 And sums where exactly one character is nontrivial (see :trac:`6393`):: sage: G = DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1] sage: Y.jacobi_sum(Z) -1 sage: Z.jacobi_sum(Y) -1 Now let's take a look at a non-prime modulus:: sage: N = 9 sage: D = DirichletGroup(N) sage: g = D(1) sage: g.jacobi_sum(g) 3 We consider a sum with values in a finite field:: sage: g = DirichletGroup(17, GF(9,'a')).0 sage: g.jacobi_sum(g**2) 2*a TESTS: This shows that :trac:`6393` has been fixed:: sage: G = DirichletGroup(5); X = G.list(); Y = X[0]; Z = X[1] sage: # Y is trivial and Z is quartic sage: sum([Y(x)*Z(1-x) for x in IntegerModRing(5)]) -1 sage: # The value -1 above is the correct value of the Jacobi sum J(Y, Z). sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y) -1 -1 """ if check: if self.parent() != char.parent(): raise NotImplementedError("Characters must be from the same Dirichlet Group.") return sum([self(x) * char(1-x) for x in rings.IntegerModRing(self.modulus())]) def kloosterman_sum(self, a=1, b=0): r""" Return the "twisted" Kloosterman sum associated to this Dirichlet character. This includes Gauss sums, classical Kloosterman sums, Salié sums, etc. The Kloosterman sum associated to `\chi` and the integers a,b is .. MATH:: K(a,b,\chi) = \sum_{r \in (\ZZ/m\ZZ)^\times} \chi(r)\,\zeta^{ar+br^{-1}}, where `m` is the modulus of `\chi` and `\zeta` is a primitive `m` th root of unity. This reduces to the Gauss sum if `b=0`. This method performs an exact calculation and returns an element of a suitable cyclotomic field; see also :meth:`.kloosterman_sum_numerical`, which gives an inexact answer (but is generally much quicker). CACHING: Computed Kloosterman sums are *not* cached with this character. EXAMPLES:: sage: G = DirichletGroup(3) sage: e = G([-1]) sage: e.kloosterman_sum(3,5) -2*zeta6 + 1 sage: G = DirichletGroup(20) sage: e = G([1 for u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*zeta20^6 + 2*zeta20^4 + 4 TESTS:: sage: G = DirichletGroup(20, UniversalCyclotomicField()) sage: e = G([1 for u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*E(5) - 4*E(5)^2 - 4*E(5)^3 - 2*E(5)^4 sage: G = DirichletGroup(12, QQbar) sage: e = G.gens()[0] sage: e.kloosterman_sum(5,11) Traceback (most recent call last): ... NotImplementedError: Kloosterman sums not implemented over this ring """ G = self.parent() zo = G.zeta_order() m = G.modulus() g = 0 L = rings.CyclotomicField(m.lcm(zo)) zeta = L.gen(0) try: self(1) * zeta**(a+b) except TypeError: raise NotImplementedError('Kloosterman sums not implemented ' 'over this ring') n = zeta.multiplicative_order() zeta = zeta**(n // m) for c in m.coprime_integers(m): e = rings.Mod(c, m) g += self(c) * zeta**int(a*e + b*e**(-1)) return g def kloosterman_sum_numerical(self, prec=53, a=1, b=0): r""" Return the Kloosterman sum associated to this Dirichlet character as an approximate complex number with prec bits of precision. See also :meth:`.kloosterman_sum`, which calculates the sum exactly (which is generally slower). INPUT: - ``prec`` -- integer (default: 53), *bits* of precision - ``a`` -- integer, as for :meth:`.kloosterman_sum` - ``b`` -- integer, as for :meth:`.kloosterman_sum`. EXAMPLES:: sage: G = DirichletGroup(3) sage: e = G.0 The real component of the numerical value of e is near zero:: sage: v=e.kloosterman_sum_numerical() sage: v.real() < 1.0e15 True sage: v.imag() 1.73205080756888 sage: G = DirichletGroup(20) sage: e = G.1 sage: e.kloosterman_sum_numerical(53,3,11) 3.80422606518061 - 3.80422606518061*I """ G = self.parent() K = G.base_ring() if not (number_field.is_CyclotomicField(K) or is_RationalField(K)): raise NotImplementedError("Kloosterman sums only currently implemented when the base ring is a cyclotomic field or QQ.") phi = K.complex_embedding(prec) CC = phi.codomain() g = 0 m = G.modulus() zeta = CC.zeta(m) for c in m.coprime_integers(m): e = rings.Mod(c, m) z = zeta ** int(a*e + b*(e**(-1))) g += phi(self(c))*z return g @cached_method def is_even(self): r""" Return ``True`` if and only if `\varepsilon(-1) = 1`. EXAMPLES:: sage: G = DirichletGroup(13) sage: e = G.0 sage: e.is_even() False sage: e(-1) -1 sage: [e.is_even() for e in G] [True, False, True, False, True, False, True, False, True, False, True, False] sage: G = DirichletGroup(13, CC) sage: e = G.0 sage: e.is_even() False sage: e(-1) -1.000000... sage: [e.is_even() for e in G] [True, False, True, False, True, False, True, False, True, False, True, False] sage: G = DirichletGroup(100000, CC) sage: G.1.is_even() True Note that ``is_even`` need not be the negation of is_odd, e.g., in characteristic 2:: sage: G.<e> = DirichletGroup(13, GF(4,'a')) sage: e.is_even() True sage: e.is_odd() True """ R = self.base_ring() # self(-1) is either +1 or -1 if not R.is_exact(): return abs(self(-1) - R(1)) < 0.5 return self(-1) == R(1) @cached_method def is_odd(self): r""" Return ``True`` if and only if `\varepsilon(-1) = -1`. EXAMPLES:: sage: G = DirichletGroup(13) sage: e = G.0 sage: e.is_odd() True sage: [e.is_odd() for e in G] [False, True, False, True, False, True, False, True, False, True, False, True] sage: G = DirichletGroup(13) sage: e = G.0 sage: e.is_odd() True sage: [e.is_odd() for e in G] [False, True, False, True, False, True, False, True, False, True, False, True] sage: G = DirichletGroup(100000, CC) sage: G.0.is_odd() True Note that ``is_even`` need not be the negation of is_odd, e.g., in characteristic 2:: sage: G.<e> = DirichletGroup(13, GF(4,'a')) sage: e.is_even() True sage: e.is_odd() True """ R = self.base_ring() # self(-1) is either +1 or -1 if not R.is_exact(): return abs(self(-1) - R(-1)) < 0.5 return self(-1) == R(-1) @cached_method def is_primitive(self): """ Return ``True`` if and only if this character is primitive, i.e., its conductor equals its modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.is_primitive() False sage: b.is_primitive() False sage: (a*b).is_primitive() True sage: G.<a,b> = DirichletGroup(20, CC) sage: a.is_primitive() False sage: b.is_primitive() False sage: (a*b).is_primitive() True """ return (self.conductor() == self.modulus()) @cached_method def is_trivial(self): r""" Returns ``True`` if this is the trivial character, i.e., has order 1. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.is_trivial() False sage: (a^2).is_trivial() True """ if self.element.is_in_cache(): return not self.element() one = self.base_ring().one() return all(x == one for x in self.values_on_gens()) def kernel(self): r""" Return the kernel of this character. OUTPUT: Currently the kernel is returned as a list. This may change. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.kernel() [1, 9, 13, 17] sage: b.kernel() [1, 11] """ one = self.base_ring().one() return [x for x in range(self.modulus()) if self(x) == one] def maximize_base_ring(self): r""" Let .. MATH:: \varepsilon : (\ZZ/N\ZZ)^* \to \QQ(\zeta_n) be a Dirichlet character. This function returns an equal Dirichlet character .. MATH:: \chi : (\ZZ/N\ZZ)^* \to \QQ(\zeta_m) where `m` is the least common multiple of `n` and the exponent of `(\ZZ/N\ZZ)^*`. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20,QQ) sage: b.maximize_base_ring() Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1 sage: b.maximize_base_ring().base_ring() Cyclotomic Field of order 4 and degree 2 sage: DirichletGroup(20).base_ring() Cyclotomic Field of order 4 and degree 2 """ g = rings.IntegerModRing(self.modulus()).unit_group_exponent() if g == 1: g = 2 z = self.base_ring().zeta() n = z.multiplicative_order() m = lcm(g,n) if n == m: return self K = rings.CyclotomicField(m) return self.change_ring(K) def minimize_base_ring(self): r""" Return a Dirichlet character that equals this one, but over as small a subfield (or subring) of the base ring as possible. .. note:: This function is currently only implemented when the base ring is a number field. It's the identity function in characteristic p. EXAMPLES:: sage: G = DirichletGroup(13) sage: e = DirichletGroup(13).0 sage: e.base_ring() Cyclotomic Field of order 12 and degree 4 sage: e.minimize_base_ring().base_ring() Cyclotomic Field of order 12 and degree 4 sage: (e^2).minimize_base_ring().base_ring() Cyclotomic Field of order 6 and degree 2 sage: (e^3).minimize_base_ring().base_ring() Cyclotomic Field of order 4 and degree 2 sage: (e^12).minimize_base_ring().base_ring() Rational Field TESTS: Check that :trac:`18479` is fixed:: sage: f = Newforms(Gamma1(25), names='a')[1] sage: eps = f.character() sage: eps.minimize_base_ring() == eps True A related bug (see :trac:`18086`):: sage: K.<a,b>=NumberField([x^2 + 1, x^2 - 3]) sage: chi = DirichletGroup(7, K).0 sage: chi.minimize_base_ring() Dirichlet character modulo 7 of conductor 7 mapping 3 |--> -1/2*b*a + 1/2 """ R = self.base_ring() if R.is_prime_field(): return self p = R.characteristic() if p: K = rings.IntegerModRing(p) elif self.order() <= 2: K = rings.QQ elif (isinstance(R, number_field.NumberField_generic) and euler_phi(self.order()) < R.absolute_degree()): K = rings.CyclotomicField(self.order()) else: return self try: return self.change_ring(K) except (TypeError, ValueError, ArithmeticError): return self def modulus(self): """ The modulus of this character. EXAMPLES:: sage: e = DirichletGroup(100, QQ).0 sage: e.modulus() 100 sage: e.conductor() 4 """ return self.parent().modulus() def level(self): """ Synonym for modulus. EXAMPLES:: sage: e = DirichletGroup(100, QQ).0 sage: e.level() 100 """ return self.modulus() @cached_method def multiplicative_order(self): """ The order of this character. EXAMPLES:: sage: e = DirichletGroup(100).1 sage: e.order() # same as multiplicative_order, since group is multiplicative 20 sage: e.multiplicative_order() 20 sage: e = DirichletGroup(100).0 sage: e.multiplicative_order() 2 """ if self.parent().zeta.is_in_cache(): return self.element().additive_order() return lcm([z.multiplicative_order() for z in self.values_on_gens()]) def primitive_character(self): """ Returns the primitive character associated to self. EXAMPLES:: sage: e = DirichletGroup(100).0; e Dirichlet character modulo 100 of conductor 4 mapping 51 |--> -1, 77 |--> 1 sage: e.conductor() 4 sage: f = e.primitive_character(); f Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1 sage: f.modulus() 4 """ return self.restrict(self.conductor()) def restrict(self, M): """ Returns the restriction of this character to a Dirichlet character modulo the divisor M of the modulus, which must also be a multiple of the conductor of this character. EXAMPLES:: sage: e = DirichletGroup(100).0 sage: e.modulus() 100 sage: e.conductor() 4 sage: e.restrict(20) Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1 sage: e.restrict(4) Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1 sage: e.restrict(50) Traceback (most recent call last): ... ValueError: conductor(=4) must divide M(=50) """ M = int(M) if self.modulus()%M != 0: raise ValueError("M(=%s) must divide the modulus(=%s)"%(M,self.modulus())) if M%self.conductor() != 0: raise ValueError("conductor(=%s) must divide M(=%s)"%(self.conductor(),M)) H = DirichletGroup(M, self.base_ring()) return H(self) @cached_method def values(self): """ Return a list of the values of this character on each integer between 0 and the modulus. EXAMPLES:: sage: e = DirichletGroup(20)(1) sage: e.values() [0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1] sage: e = DirichletGroup(20).gen(0) sage: e.values() [0, 1, 0, -1, 0, 0, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0, 0, 1, 0, -1] sage: e = DirichletGroup(20).gen(1) sage: e.values() [0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1, 0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1] sage: e = DirichletGroup(21).gen(0) ; e.values() [0, 1, -1, 0, 1, -1, 0, 0, -1, 0, 1, -1, 0, 1, 0, 0, 1, -1, 0, 1, -1] sage: e = DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values() [0, 1, 36, 0, 1, 36, 0, 0, 36, 0, 1, 36, 0, 1, 0, 0, 1, 36, 0, 1, 36] sage: e = DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values() [0, 1, 2, 0, 1, 2, 0, 0, 2, 0, 1, 2, 0, 1, 0, 0, 1, 2, 0, 1, 2] :: sage: chi = DirichletGroup(100151, CyclotomicField(10)).0 sage: ls = chi.values() ; ls[0:10] [0, 1, -zeta10^3, -zeta10, -zeta10, 1, zeta10^3 - zeta10^2 + zeta10 - 1, zeta10, zeta10^3 - zeta10^2 + zeta10 - 1, zeta10^2] TESTS: Test that :trac:`11783` and :trac:`14368` are fixed:: sage: chi = DirichletGroup(1).list()[0] sage: chi.values() [1] sage: chi(1) 1 """ G = self.parent() R = G.base_ring() mod = self.parent().modulus() if mod == 1: return [R.one()] elif mod == 2: return [R.zero(), R.one()] result_list = [R.zero()] * mod gens = G.unit_gens() orders = G.integers_mod().unit_group().gens_orders() R_values = G._zeta_powers val_on_gen = self.element() exponents = [0] * len(orders) n = G.integers_mod().one() value = val_on_gen.base_ring().zero() while True: # record character value on n result_list[n] = R_values[value] # iterate: # increase the exponent vector by 1, # increase n accordingly, and increase value i = 0 while True: try: exponents[i] += 1 except IndexError: # Done! return result_list value += val_on_gen[i] n *= gens[i] if exponents[i] < orders[i]: break exponents[i] = 0 i += 1 @cached_method(do_pickle=True) def values_on_gens(self): r""" Return a tuple of the values of ``self`` on the standard generators of `(\ZZ/N\ZZ)^*`, where `N` is the modulus. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: e.values_on_gens () (-1, 1) .. NOTE:: The constructor of :class:`DirichletCharacter` sets the cache of :meth:`element` or of :meth:`values_on_gens`. The cache of one of these methods needs to be set for the other method to work properly, these caches have to be stored when pickling an instance of :class:`DirichletCharacter`. """ pows = self.parent()._zeta_powers return tuple([pows[i] for i in self.element()]) @cached_method(do_pickle=True) def element(self): r""" Return the underlying `\ZZ/n\ZZ`-module vector of exponents. .. warning:: Please do not change the entries of the returned vector; this vector is mutable *only* because immutable vectors are not implemented yet. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.element() (2, 0) sage: b.element() (0, 1) .. NOTE:: The constructor of :class:`DirichletCharacter` sets the cache of :meth:`element` or of :meth:`values_on_gens`. The cache of one of these methods needs to be set for the other method to work properly, these caches have to be stored when pickling an instance of :class:`DirichletCharacter`. """ P = self.parent() M = P._module if is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument = zeta.argument() v = M([int(round(x.argument() / zeta_argument)) for x in self.values_on_gens()]) else: dlog = P._zeta_dlog v = M([dlog[x] for x in self.values_on_gens()]) v.set_immutable() return v def __setstate__(self, state): r""" Restore a pickled element from ``state``. TESTS:: sage: e = DirichletGroup(16)([-1, 1]) sage: loads(dumps(e)) == e True """ # values_on_gens() used an explicit cache __values_on_gens in the past # we need to set the cache of values_on_gens() from that if we encounter it in a pickle values_on_gens_key = '_DirichletCharacter__values_on_gens' values_on_gens = None state_dict = state[1] if values_on_gens_key in state_dict: values_on_gens = state_dict[values_on_gens_key] del state_dict[values_on_gens_key] # element() used an explicit cache __element in the past # we need to set the cache of element() from that if we encounter it in a pickle element_key = '_DirichletCharacter__element' element = None if element_key in state_dict: element = state_dict[element_key] del state_dict[element_key] super(DirichletCharacter, self).__setstate__(state) if values_on_gens is not None: self.values_on_gens.set_cache(values_on_gens) if element is not None: self.element.set_cache(element) class DirichletGroupFactory(UniqueFactory): r""" Construct a group of Dirichlet characters modulo `N`. INPUT: - ``N`` -- positive integer - ``base_ring`` -- commutative ring; the value ring for the characters in this group (default: the cyclotomic field `\QQ(\zeta_n)`, where `n` is the exponent of `(\ZZ/N\ZZ)^*`) - ``zeta`` -- (optional) root of unity in ``base_ring`` - ``zeta_order`` -- (optional) positive integer; this must be the order of ``zeta`` if both are specified - ``names`` -- ignored (needed so ``G.<...> = DirichletGroup(...)`` notation works) - ``integral`` -- boolean (default: ``False``); whether to replace the default cyclotomic field by its rings of integers as the base ring. This is ignored if ``base_ring`` is not ``None``. OUTPUT: The group of Dirichlet characters modulo `N` with values in a subgroup `V` of the multiplicative group `R^*` of ``base_ring``. This is the group of homomorphisms `(\ZZ/N\ZZ)^* \to V` with pointwise multiplication. The group `V` is determined as follows: - If both ``zeta`` and ``zeta_order`` are omitted, then `V` is taken to be `R^*`, or equivalently its `n`-torsion subgroup, where `n` is the exponent of `(\ZZ/N\ZZ)^*`. Many operations, such as finding a set of generators for the group, are only implemented if `V` is cyclic and a generator for `V` can be found. - If ``zeta`` is specified, then `V` is taken to be the cyclic subgroup of `R^*` generated by ``zeta``. If ``zeta_order`` is also given, it must be the multiplicative order of ``zeta``; this is useful if the base ring is not exact or if the order of ``zeta`` is very large. - If ``zeta`` is not specified but ``zeta_order`` is, then `V` is taken to be the group of roots of unity of order dividing ``zeta_order`` in `R`. In this case, `R` must be a domain (so `V` is cyclic), and `V` must have order ``zeta_order``. Furthermore, a generator ``zeta`` of `V` is computed, and an error is raised if such ``zeta`` cannot be found. EXAMPLES: The default base ring is a cyclotomic field of order the exponent of `(\ZZ/N\ZZ)^*`:: sage: DirichletGroup(20) Group of Dirichlet characters modulo 20 with values in Cyclotomic Field of order 4 and degree 2 We create the group of Dirichlet character mod 20 with values in the rational numbers:: sage: G = DirichletGroup(20, QQ); G Group of Dirichlet characters modulo 20 with values in Rational Field sage: G.order() 4 sage: G.base_ring() Rational Field The elements of G print as lists giving the values of the character on the generators of `(Z/NZ)^*`:: sage: list(G) [Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1, Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1, Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> -1] Next we construct the group of Dirichlet character mod 20, but with values in `\QQ(\zeta_n)`:: sage: G = DirichletGroup(20) sage: G.1 Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4 We next compute several invariants of ``G``:: sage: G.gens() (Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4) sage: G.unit_gens() (11, 17) sage: G.zeta() zeta4 sage: G.zeta_order() 4 In this example we create a Dirichlet group with values in a number field:: sage: R.<x> = PolynomialRing(QQ) sage: K.<a> = NumberField(x^4 + 1) sage: DirichletGroup(5, K) Group of Dirichlet characters modulo 5 with values in Number Field in a with defining polynomial x^4 + 1 An example where we give ``zeta``, but not its order:: sage: G = DirichletGroup(5, K, a); G Group of Dirichlet characters modulo 5 with values in the group of order 8 generated by a in Number Field in a with defining polynomial x^4 + 1 sage: G.list() [Dirichlet character modulo 5 of conductor 1 mapping 2 |--> 1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> a^2, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -a^2] We can also restrict the order of the characters, either with or without specifying a root of unity:: sage: DirichletGroup(5, K, zeta=-1, zeta_order=2) Group of Dirichlet characters modulo 5 with values in the group of order 2 generated by -1 in Number Field in a with defining polynomial x^4 + 1 sage: DirichletGroup(5, K, zeta_order=2) Group of Dirichlet characters modulo 5 with values in the group of order 2 generated by -1 in Number Field in a with defining polynomial x^4 + 1 :: sage: G.<e> = DirichletGroup(13) sage: loads(G.dumps()) == G True :: sage: G = DirichletGroup(19, GF(5)) sage: loads(G.dumps()) == G True We compute a Dirichlet group over a large prime field:: sage: p = next_prime(10^40) sage: g = DirichletGroup(19, GF(p)); g Group of Dirichlet characters modulo 19 with values in Finite Field of size 10000000000000000000000000000000000000121 Note that the root of unity has small order, i.e., it is not the largest order root of unity in the field:: sage: g.zeta_order() 2 :: sage: r4 = CyclotomicField(4).ring_of_integers() sage: G = DirichletGroup(60, r4) sage: G.gens() (Dirichlet character modulo 60 of conductor 4 mapping 31 |--> -1, 41 |--> 1, 37 |--> 1, Dirichlet character modulo 60 of conductor 3 mapping 31 |--> 1, 41 |--> -1, 37 |--> 1, Dirichlet character modulo 60 of conductor 5 mapping 31 |--> 1, 41 |--> 1, 37 |--> zeta4) sage: val = G.gens()[2].values_on_gens()[2] ; val zeta4 sage: parent(val) Gaussian Integers in Cyclotomic Field of order 4 and degree 2 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) 17 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3) 22 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3 22 sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3) Residue field of Fractional ideal (-2*zeta4 + 5) :: sage: DirichletGroup(60, integral=True) Group of Dirichlet characters modulo 60 with values in Gaussian Integers in Cyclotomic Field of order 4 and degree 2 sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2]) Gaussian Integers in Cyclotomic Field of order 4 and degree 2 If the order of ``zeta`` cannot be determined automatically, we can specify it using ``zeta_order``:: sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6)) Traceback (most recent call last): ... NotImplementedError: order of element not known sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6) Group of Dirichlet characters modulo 7 with values in the group of order 6 generated by 0.500000000000000 + 0.866025403784439*I in Complex Field with 53 bits of precision If the base ring is not a domain (in which case the group of roots of unity is not necessarily cyclic), some operations still work, such as creation of elements:: sage: G = DirichletGroup(5, Zmod(15)); G Group of Dirichlet characters modulo 5 with values in Ring of integers modulo 15 sage: chi = G([13]); chi Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 13 sage: chi^2 Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 4 sage: chi.multiplicative_order() 4 Other operations only work if ``zeta`` is specified:: sage: G.gens() Traceback (most recent call last): ... NotImplementedError: factorization of polynomials over rings with composite characteristic is not implemented sage: G = DirichletGroup(5, Zmod(15), zeta=2); G Group of Dirichlet characters modulo 5 with values in the group of order 4 generated by 2 in Ring of integers modulo 15 sage: G.gens() (Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 2,) TESTS: Dirichlet groups are cached, creating two groups with the same parameters yields the same object:: sage: DirichletGroup(60) is DirichletGroup(60) True """ def create_key(self, N, base_ring=None, zeta=None, zeta_order=None, names=None, integral=False): """ Create a key that uniquely determines a Dirichlet group. TESTS:: sage: DirichletGroup.create_key(60) (Cyclotomic Field of order 4 and degree 2, 60, None, None) An example to illustrate that ``base_ring`` is a part of the key:: sage: k = DirichletGroup.create_key(2, base_ring=QQ); k (Rational Field, 2, None, None) sage: l = DirichletGroup.create_key(2, base_ring=CC); l (Complex Field with 53 bits of precision, 2, None, None) sage: k == l False sage: G = DirichletGroup.create_object(None, k); G Group of Dirichlet characters modulo 2 with values in Rational Field sage: H = DirichletGroup.create_object(None, l); H Group of Dirichlet characters modulo 2 with values in Complex Field with 53 bits of precision sage: G == H False If ``base_ring`` was not be a part of the key, the keys would compare equal and the caching would be broken:: sage: k = k[1:]; k (2, None, None) sage: l = l[1:]; l (2, None, None) sage: k == l True sage: DirichletGroup(2, base_ring=QQ) is DirichletGroup(2, base_ring=CC) False If the base ring is not an integral domain, an error will be raised if only ``zeta_order`` is specified:: sage: DirichletGroup(17, Integers(15)) Group of Dirichlet characters modulo 17 with values in Ring of integers modulo 15 sage: DirichletGroup(17, Integers(15), zeta_order=4) Traceback (most recent call last): ... ValueError: base ring (= Ring of integers modulo 15) must be an integral domain if only zeta_order is specified sage: G = DirichletGroup(17, Integers(15), zeta=7); G Group of Dirichlet characters modulo 17 with values in the group of order 4 generated by 7 in Ring of integers modulo 15 sage: G.order() 4 sage: DirichletGroup(-33) Traceback (most recent call last): ... ValueError: modulus should be positive """ modulus = rings.Integer(N) if modulus <= 0: raise ValueError('modulus should be positive') if base_ring is None: if not (zeta is None and zeta_order is None): raise ValueError("zeta and zeta_order must be None if base_ring not specified") e = rings.IntegerModRing(modulus).unit_group_exponent() base_ring = rings.CyclotomicField(e) if integral: base_ring = base_ring.ring_of_integers() if not is_Ring(base_ring): raise TypeError("base_ring (= %s) must be a ring" % base_ring) # If either zeta or zeta_order is given, compute the other. if zeta is not None: zeta = base_ring(zeta) if zeta_order is None: zeta_order = zeta.multiplicative_order() elif zeta_order is not None: if not base_ring.is_integral_domain(): raise ValueError("base ring (= %s) must be an integral domain if only zeta_order is specified" % base_ring) zeta_order = rings.Integer(zeta_order) zeta = base_ring.zeta(zeta_order) return (base_ring, modulus, zeta, zeta_order) def create_object(self, version, key, **extra_args): """ Create the object from the key (extra arguments are ignored). This is only called if the object was not found in the cache. TESTS:: sage: K = CyclotomicField(4) sage: DirichletGroup.create_object(None, (K, 60, K.gen(), 4)) Group of Dirichlet characters modulo 60 with values in the group of order 4 generated by zeta4 in Cyclotomic Field of order 4 and degree 2 """ base_ring, modulus, zeta, zeta_order = key return DirichletGroup_class(base_ring, modulus, zeta, zeta_order) DirichletGroup = DirichletGroupFactory("DirichletGroup") def is_DirichletGroup(x): """ Returns True if x is a Dirichlet group. EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletGroup sage: is_DirichletGroup(DirichletGroup(11)) True sage: is_DirichletGroup(11) False sage: is_DirichletGroup(DirichletGroup(11).0) False """ return isinstance(x, DirichletGroup_class) class DirichletGroup_class(WithEqualityById, Parent): """ Group of Dirichlet characters modulo `N` with values in a ring `R`. """ Element = DirichletCharacter def __init__(self, base_ring, modulus, zeta, zeta_order): """ Create a Dirichlet group. Not to be called directly (use the factory function ``DirichletGroup``). The ``DirichletGroup`` factory ensures that either both ``zeta`` and ``zeta_order`` are specified, or that both are ``None``. In the former case, it also ensures that ``zeta`` is an element of ``base_ring`` and that ``zeta_order`` is an element of ``ZZ``. TESTS:: sage: G = DirichletGroup(7, base_ring=Integers(9), zeta=2) # indirect doctest sage: TestSuite(G).run() sage: G.base() # check that Parent.__init__ has been called Ring of integers modulo 9 sage: DirichletGroup(13) == DirichletGroup(13) True sage: DirichletGroup(13) == DirichletGroup(13, QQ) False """ from sage.categories.groups import Groups category = Groups().Commutative() if base_ring.is_integral_domain() or base_ring.is_finite(): # The group of n-th roots of unity in the base ring is # finite, and hence this Dirichlet group is finite too. # In particular, it is finitely generated; the added # FinitelyGenerated() here means that the group has a # distinguished set of generators. category = category.Finite().FinitelyGenerated() Parent.__init__(self, base_ring, category=category) self._zeta = zeta self._zeta_order = zeta_order self._modulus = modulus self._integers = rings.IntegerModRing(modulus) def __setstate__(self, state): """ Used for unpickling old instances. TESTS:: sage: G = DirichletGroup(9) sage: loads(dumps(G)) is G True """ self._set_element_constructor() if '_zeta_order' in state: state['_zeta_order'] = rings.Integer(state['_zeta_order']) super(DirichletGroup_class, self).__setstate__(state) @property def _module(self): """ Return the free module used to represent Dirichlet characters. TESTS:: sage: DirichletGroup(12)._module Vector space of dimension 2 over Ring of integers modulo 2 """ return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()), len(self.unit_gens())) @property def _zeta_powers(self): """ Return a list of powers of the distinguished root of unity. TESTS:: sage: DirichletGroup(5)._zeta_powers [1, zeta4, -1, -zeta4] """ R = self.base_ring() a = R.one() w = [a] zeta = self.zeta() zeta_order = self.zeta_order() if is_ComplexField(R): for i in range(1, zeta_order): a = a * zeta a._set_multiplicative_order(zeta_order/gcd(zeta_order, i)) w.append(a) else: for i in range(1, zeta_order): a = a * zeta w.append(a) return w @property def _zeta_dlog(self): """ Return a dictionary that can be used to compute discrete logarithms in the value group of this Dirichlet group. TESTS:: sage: DirichletGroup(5)._zeta_dlog {-1: 2, -zeta4: 3, zeta4: 1, 1: 0} """ return {z: i for i, z in enumerate(self._zeta_powers)} def change_ring(self, R, zeta=None, zeta_order=None): """ Return the base extension of ``self`` to ``R``. INPUT: - ``R`` -- either a ring admitting a conversion map from the base ring of ``self``, or a ring homomorphism with the base ring of ``self`` as its domain - ``zeta`` -- (optional) root of unity in ``R`` - ``zeta_order`` -- (optional) order of ``zeta`` EXAMPLES:: sage: G = DirichletGroup(7,QQ); G Group of Dirichlet characters modulo 7 with values in Rational Field sage: G.change_ring(CyclotomicField(6)) Group of Dirichlet characters modulo 7 with values in Cyclotomic Field of order 6 and degree 2 TESTS: We test the case where `R` is a map (:trac:`18072`):: sage: K.<i> = QuadraticField(-1) sage: f = K.complex_embeddings()[0] sage: D = DirichletGroup(5, K) sage: D.change_ring(f) Group of Dirichlet characters modulo 5 with values in Complex Field with 53 bits of precision """ if zeta is None and self._zeta is not None: # A root of unity was explicitly given; we use it over the # new base ring as well. zeta = self._zeta if zeta_order is None: # We reuse _zeta_order if we know that it stays the # same; otherwise it will be recomputed as the order # of R(zeta) by the DirichletGroup factory. p = R.characteristic() if p == 0 or p.gcd(self._zeta_order) == 1: zeta_order = self._zeta_order else: # No root of unity specified; use the same zeta_order # (which may still be None). zeta_order = self._zeta_order # Map zeta to the new parent if zeta is not None: zeta = R(zeta) if isinstance(R, Map): R = R.codomain() return DirichletGroup(self.modulus(), R, zeta=zeta, zeta_order=zeta_order) def base_extend(self, R): """ Return the base extension of ``self`` to ``R``. INPUT: - ``R`` -- either a ring admitting a *coercion* map from the base ring of ``self``, or a ring homomorphism with the base ring of ``self`` as its domain EXAMPLES:: sage: G = DirichletGroup(7,QQ); G Group of Dirichlet characters modulo 7 with values in Rational Field sage: H = G.base_extend(CyclotomicField(6)); H Group of Dirichlet characters modulo 7 with values in Cyclotomic Field of order 6 and degree 2 Note that the root of unity can change:: sage: H.zeta() zeta6 This method (in contrast to :meth:`change_ring`) requires a coercion map to exist:: sage: G.base_extend(ZZ) Traceback (most recent call last): ... TypeError: no coercion map from Rational Field to Integer Ring is defined Base-extended Dirichlet groups do not silently get roots of unity with smaller order than expected (:trac:`6018`):: sage: G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4)) sage: H = DirichletGroup(10, CyclotomicField(4)) sage: G is H True sage: G3 = DirichletGroup(31, CyclotomicField(3)) sage: G5 = DirichletGroup(31, CyclotomicField(5)) sage: K30 = CyclotomicField(30) sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30) Dirichlet character modulo 31 of conductor 31 mapping 3 |--> -zeta30^7 + zeta30^5 + zeta30^4 + zeta30^3 - zeta30 - 1 When a root of unity is specified, base extension still works if the new base ring is not an integral domain:: sage: f = DirichletGroup(17, ZZ, zeta=-1).0 sage: g = f.base_extend(Integers(15)) sage: g(3) 14 sage: g.parent().zeta() 14 """ if not (isinstance(R, Map) or R.has_coerce_map_from(self.base_ring())): raise TypeError("no coercion map from %s to %s is defined" % (self.base_ring(), R)) return self.change_ring(R) def _element_constructor_(self, x): """ Construct a Dirichlet character from `x`. EXAMPLES:: sage: G = DirichletGroup(13) sage: K = G.base_ring() sage: G(1) Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1 sage: G([-1]) Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -1 sage: G([K.0]) Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12 sage: G(0) Traceback (most recent call last): ... TypeError: cannot convert 0 to an element of Group of Dirichlet characters modulo 13 with values in Cyclotomic Field of order 12 and degree 4 sage: G = DirichletGroup(6) sage: G(DirichletGroup(3).0) Dirichlet character modulo 6 of conductor 3 mapping 5 |--> -1 sage: G(DirichletGroup(15).0) Dirichlet character modulo 6 of conductor 3 mapping 5 |--> -1 sage: G(DirichletGroup(15).1) Traceback (most recent call last): ... TypeError: conductor must divide modulus sage: H = DirichletGroup(16, QQ); H(DirichletGroup(16).1) Traceback (most recent call last): ... TypeError: Unable to coerce zeta4 to a rational """ R = self.base_ring() try: if x == R.one(): x = [R.one()] * len(self.unit_gens()) except (TypeError, ValueError, ArithmeticError): pass if isinstance(x, list): # list of values on each unit generator return self.element_class(self, x) elif not isinstance(x, DirichletCharacter): raise TypeError("cannot convert %s to an element of %s" % (x, self)) elif not x.conductor().divides(self.modulus()): raise TypeError("conductor must divide modulus") a = [] for u in self.unit_gens(): v = u.lift() # have to do this, since e.g., unit gens mod 11 are not units mod 22. while x.modulus().gcd(v) != 1: v += self.modulus() a.append(R(x(v))) return self.element_class(self, a) def _coerce_map_from_(self, X): """ Decide whether there is a coercion map from `X`. There is conversion between Dirichlet groups of different moduli, but no coercion. This implies that Dirichlet characters of different moduli do not compare as equal. TESTS:: sage: trivial_character(6) == trivial_character(3) # indirect doctest False sage: trivial_character(3) == trivial_character(9) False sage: trivial_character(3) == DirichletGroup(3, QQ).0^2 True """ return (isinstance(X, DirichletGroup_class) and self.modulus() == X.modulus() and self.base_ring().has_coerce_map_from(X.base_ring()) and (self._zeta is None or (X._zeta is not None and self.base_ring()(X._zeta) in self._zeta_powers))) def __len__(self): """ Return the number of elements of this Dirichlet group. This is the same as self.order(). EXAMPLES:: sage: len(DirichletGroup(20)) 8 sage: len(DirichletGroup(20, QQ)) 4 sage: len(DirichletGroup(20, GF(5))) 8 sage: len(DirichletGroup(20, GF(2))) 1 sage: len(DirichletGroup(20, GF(3))) 4 """ return self.order() def _repr_(self): """ Return a print representation of this group, which can be renamed. EXAMPLES:: sage: G = DirichletGroup(11) sage: repr(G) # indirect doctest 'Group of Dirichlet characters modulo 11 with values in Cyclotomic Field of order 10 and degree 4' sage: G.rename('Dir(11)') sage: G Dir(11) """ s = "Group of Dirichlet characters modulo %s with values in " % self.modulus() if self._zeta is not None: s += "the group of order %s generated by %s in " % (self._zeta_order, self._zeta) s += str(self.base_ring()) return s @cached_method def decomposition(self): r""" Returns the Dirichlet groups of prime power modulus corresponding to primes dividing modulus. (Note that if the modulus is 2 mod 4, there will be a "factor" of `(\ZZ/2\ZZ)^*`, which is the trivial group.) EXAMPLES:: sage: DirichletGroup(20).decomposition() [ Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2, Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2 ] sage: DirichletGroup(20,GF(5)).decomposition() [ Group of Dirichlet characters modulo 4 with values in Finite Field of size 5, Group of Dirichlet characters modulo 5 with values in Finite Field of size 5 ] """ R = self.base_ring() return Sequence([DirichletGroup(p**r,R) for p, r \ in factor(self.modulus())], cr=True, universe = cat.Objects()) def exponent(self): """ Return the exponent of this group. EXAMPLES:: sage: DirichletGroup(20).exponent() 4 sage: DirichletGroup(20,GF(3)).exponent() 2 sage: DirichletGroup(20,GF(2)).exponent() 1 sage: DirichletGroup(37).exponent() 36 """ return self.zeta_order() @cached_method def _automorphisms(self): """ Compute the automorphisms of self. These are always given by raising to a power, so the return value is a list of integers. At present this is only implemented if the base ring has characteristic 0 or a prime. EXAMPLES:: sage: DirichletGroup(17)._automorphisms() [1, 3, 5, 7, 9, 11, 13, 15] sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms() [1, 11, 121, 1331] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms() Traceback (most recent call last): ... NotImplementedError: Automorphisms for finite non-field base rings not implemented sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms() Traceback (most recent call last): ... NotImplementedError: Automorphisms for finite non-field base rings not implemented """ n = self.zeta_order() R = self.base_ring() p = R.characteristic() if p == 0: Auts = [e for e in range(1,n) if gcd(e,n) == 1] else: if not rings.ZZ(p).is_prime(): raise NotImplementedError("Automorphisms for finite non-field base rings not implemented") # The automorphisms in characteristic p are # k-th powering for # k = 1, p, p^2, ..., p^(r-1), # where p^r = 1 (mod n), so r is the mult order of p modulo n. r = rings.IntegerModRing(n)(p).multiplicative_order() Auts = [p**m for m in range(0,r)] return Auts def galois_orbits(self, v=None, reps_only=False, sort=True, check=True): """ Return a list of the Galois orbits of Dirichlet characters in self, or in v if v is not None. INPUT: - ``v`` - (optional) list of elements of self - ``reps_only`` - (optional: default False) if True only returns representatives for the orbits. - ``sort`` - (optional: default True) whether to sort the list of orbits and the orbits themselves (slightly faster if False). - ``check`` - (optional, default: True) whether or not to explicitly coerce each element of v into self. The Galois group is the absolute Galois group of the prime subfield of Frac(R). If R is not a domain, an error will be raised. EXAMPLES:: sage: DirichletGroup(20).galois_orbits() [ [Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> -1], ..., [Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1] ] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits() Traceback (most recent call last): ... TypeError: Galois orbits only defined if base ring is an integral domain sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits() Traceback (most recent call last): ... TypeError: Galois orbits only defined if base ring is an integral domain """ if v is None: v = self.list() else: if check: v = [self(x) for x in v] G = [] seen_so_far = set([]) for x in v: z = x.element() e = tuple(z) # change when there are immutable vectors (and below) if e in seen_so_far: continue orbit = x.galois_orbit(sort=sort) if reps_only: G.append(x) else: G.append(orbit) for z in orbit: seen_so_far.add(tuple(z.element())) G = Sequence(G, cr=True) if sort: G.sort() return G def gen(self, n=0): """ Return the n-th generator of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.gen(0) Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1 sage: G.gen(1) Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4 sage: G.gen(2) Traceback (most recent call last): ... IndexError: n(=2) must be between 0 and 1 :: sage: G.gen(-1) Traceback (most recent call last): ... IndexError: n(=-1) must be between 0 and 1 """ n = int(n) g = self.gens() if n<0 or n>=len(g): raise IndexError("n(=%s) must be between 0 and %s"%(n,len(g)-1)) return g[n] @cached_method def gens(self): """ Returns generators of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.gens() (Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4) """ g = [] ord = self.zeta_order() M = self._module zero = M(0) orders = self.integers_mod().unit_group().gens_orders() for i in range(len(self.unit_gens())): z = zero.__copy__() z[i] = ord//gcd(ord, orders[i]) g.append(self.element_class(self, z, check=False)) return tuple(g) def integers_mod(self): r""" Returns the group of integers `\ZZ/N\ZZ` where `N` is the modulus of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.integers_mod() Ring of integers modulo 20 """ return self._integers __iter__ = multiplicative_iterator def list(self): """ Return a list of the Dirichlet characters in this group. EXAMPLES:: sage: DirichletGroup(5).list() [Dirichlet character modulo 5 of conductor 1 mapping 2 |--> 1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4] """ return self._list_from_iterator() def modulus(self): """ Returns the modulus of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.modulus() 20 """ return self._modulus def ngens(self): """ Returns the number of generators of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.ngens() 2 """ return len(self.gens()) @cached_method def order(self): """ Return the number of elements of self. This is the same as len(self). EXAMPLES:: sage: DirichletGroup(20).order() 8 sage: DirichletGroup(37).order() 36 """ ord = rings.Integer(1) for g in self.gens(): ord *= int(g.order()) return ord def random_element(self): """ Return a random element of self. The element is computed by multiplying a random power of each generator together, where the power is between 0 and the order of the generator minus 1, inclusive. EXAMPLES:: sage: DirichletGroup(37).random_element() Dirichlet character modulo 37 of conductor 37 mapping 2 |--> zeta36^4 sage: DirichletGroup(20).random_element() Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1 sage: DirichletGroup(60).random_element() Dirichlet character modulo 60 of conductor 3 mapping 31 |--> 1, 41 |--> -1, 37 |--> 1 """ e = self(1) for i in range(self.ngens()): g = self.gen(i) n = random.randrange(g.order()) e *= g**n return e def unit_gens(self): r""" Returns the minimal generators for the units of `(\ZZ/N\ZZ)^*`, where `N` is the modulus of self. EXAMPLES:: sage: DirichletGroup(37).unit_gens() (2,) sage: DirichletGroup(20).unit_gens() (11, 17) sage: DirichletGroup(60).unit_gens() (31, 41, 37) sage: DirichletGroup(20,QQ).unit_gens() (11, 17) """ return self._integers.unit_gens() @cached_method def zeta(self): """ Return the chosen root of unity in the base ring. EXAMPLES:: sage: DirichletGroup(37).zeta() zeta36 sage: DirichletGroup(20).zeta() zeta4 sage: DirichletGroup(60).zeta() zeta4 sage: DirichletGroup(60,QQ).zeta() -1 sage: DirichletGroup(60, GF(25,'a')).zeta() 2 """ zeta = self._zeta if zeta is None: R = self.base_ring() e = self._integers.unit_group_exponent() for d in reversed(e.divisors()): try: zeta = R.zeta(d) break except ValueError: pass self.zeta_order.set_cache(d) return zeta @cached_method def zeta_order(self): """ Return the order of the chosen root of unity in the base ring. EXAMPLES:: sage: DirichletGroup(20).zeta_order() 4 sage: DirichletGroup(60).zeta_order() 4 sage: DirichletGroup(60, GF(25,'a')).zeta_order() 4 sage: DirichletGroup(19).zeta_order() 18 """ order = self._zeta_order if order is None: order = self.zeta().multiplicative_order() return order
34.562563
378
0.545557
13,353
103,584
4.161312
0.072568
0.011698
0.0406
0.017781
0.462531
0.399687
0.342728
0.303081
0.278858
0.23581
0
0.049846
0.343431
103,584
2,996
379
34.574099
0.767181
0.598046
0
0.25
0
0.002688
0.059202
0.002134
0
0
0
0
0
1
0.104839
false
0.002688
0.038978
0
0.274194
0.001344
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5f3213a4f40ad41fa4289061fc1bfb9a560419
6,445
py
Python
src/biotite/file.py
danijoo/biotite
22072e64676e4e917236eac8493eed4c6a22cc33
[ "BSD-3-Clause" ]
208
2018-04-20T15:59:42.000Z
2022-03-22T07:47:12.000Z
src/biotite/file.py
danielmuthama/biotite
cb238a8d8d7dc82b3bcea274d7d91d5c876badcd
[ "BSD-3-Clause" ]
121
2017-11-15T14:52:07.000Z
2022-03-30T16:31:41.000Z
src/biotite/file.py
danielmuthama/biotite
cb238a8d8d7dc82b3bcea274d7d91d5c876badcd
[ "BSD-3-Clause" ]
49
2018-07-19T09:06:24.000Z
2022-03-23T17:21:34.000Z
# This source code is part of the Biotite package and is distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst' for further # information. __name__ = "biotite" __author__ = "Patrick Kunzmann" __all__ = ["File", "TextFile", "InvalidFileError"] import abc import io import warnings from .copyable import Copyable import copy class File(Copyable, metaclass=abc.ABCMeta): """ Base class for all file classes. The constructor creates an empty file, that can be filled with data using the class specific setter methods. Conversely, the class method :func:`read()` reads a file from disk (or a file-like object from other sources). In order to write the instance content into a file the :func:`write()` method is used. """ def __init__(self): # Support for deprecated instance method 'read()': # When creating an instance, the 'read()' class method is # replaced by the instance method, so that subsequent # 'read()' calls are delegated to the instance method self.read = self._deprecated_read @classmethod @abc.abstractmethod def read(cls, file): """ Parse a file (or file-like object). Parameters ---------- file : file-like object or str The file to be read. Alternatively a file path can be supplied. Returns ------- file_object : File An instance from the respective :class:`File` subclass representing the parsed file. """ pass def _deprecated_read(self, file, *args, **kwargs): """ Support for deprecated instance method :func:`read()`. Internally this calls the :func:`read()` class method and replaces the data in `self` with the data from the newly created :class:`File` object """ warnings.warn( "Instance method 'read()' is deprecated, " "use class method instead", DeprecationWarning ) cls = type(self) new_file = cls.read(file, *args, **kwargs) self.__dict__.update(new_file.__dict__) @abc.abstractmethod def write(self, file): """ Write the contents of this :class:`File` object into a file. Parameters ---------- file_name : file-like object or str The file to be written to. Alternatively a file path can be supplied. """ pass class TextFile(File, metaclass=abc.ABCMeta): """ Base class for all line based text files. When reading a file, the text content is saved as list of strings, one for each line. When writing a file, this list is written into the file. Attributes ---------- lines : list List of string representing the lines in the text file. PROTECTED: Do not modify from outside. """ def __init__(self): super().__init__() self.lines = [] @classmethod def read(cls, file, *args, **kwargs): # File name if isinstance(file, str): with open(file, "r") as f: lines = f.read().splitlines() # File object else: if not is_text(file): raise TypeError("A file opened in 'text' mode is required") lines = file.read().splitlines() file_object = cls(*args, **kwargs) file_object.lines = lines return file_object @staticmethod def read_iter(file): """ Create an iterator over each line of the given text file. Parameters ---------- file : file-like object or str The file to be read. Alternatively a file path can be supplied. Yields ------ line : str The current line in the file. """ # File name if isinstance(file, str): with open(file, "r") as f: while True: line = f.readline() if not line: break yield line # File object else: if not is_text(file): raise TypeError("A file opened in 'text' mode is required") while True: line = file.readline() if not line: break yield line def write(self, file): """ Write the contents of this object into a file (or file-like object). Parameters ---------- file_name : file-like object or str The file to be written to. Alternatively a file path can be supplied. """ if isinstance(file, str): with open(file, "w") as f: f.write("\n".join(self.lines) + "\n") else: if not is_text(file): raise TypeError("A file opened in 'text' mode is required") file.write("\n".join(self.lines) + "\n") def __copy_fill__(self, clone): super().__copy_fill__(clone) clone.lines = copy.copy(self.lines) def __str__(self): return("\n".join(self.lines)) class InvalidFileError(Exception): """ Indicates that the file is not suitable for the requested action, either because the file does not contain the required data or because the file is malformed. """ pass def wrap_string(text, width): """ A much simpler and hence much more efficient version of `textwrap.wrap()`. This function simply wraps the given `text` after `width` characters, ignoring sentences, whitespaces, etc. """ lines = [] for i in range(0, len(text), width): lines.append(text[i : i+width]) return lines def is_binary(file): if isinstance(file, io.BufferedIOBase): return True # for file wrappers, e.g. 'TemporaryFile' elif hasattr(file, "file") and isinstance(file.file, io.BufferedIOBase): return True else: return False def is_text(file): if isinstance(file, io.TextIOBase): return True # for file wrappers, e.g. 'TemporaryFile' elif hasattr(file, "file") and isinstance(file.file, io.TextIOBase): return True else: return False
29.162896
76
0.565244
770
6,445
4.646753
0.280519
0.020961
0.02739
0.017887
0.37479
0.316098
0.304919
0.259922
0.248183
0.226942
0
0.000472
0.342126
6,445
220
77
29.295455
0.843396
0.407758
0
0.43617
0
0
0.079086
0
0
0
0
0
0
1
0.138298
false
0.031915
0.053191
0.010638
0.308511
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5fa0d5b2ac5b94aac410a26a9a516f09e6dcbd
4,918
py
Python
src/cms/views/push_notifications/push_notification_sender.py
mckinly/cms-django
c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca
[ "Apache-2.0" ]
null
null
null
src/cms/views/push_notifications/push_notification_sender.py
mckinly/cms-django
c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca
[ "Apache-2.0" ]
5
2021-02-10T02:41:20.000Z
2022-03-12T00:56:56.000Z
src/cms/views/push_notifications/push_notification_sender.py
mckinly/cms-django
c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca
[ "Apache-2.0" ]
null
null
null
""" Module for sending Push Notifications """ import logging import requests from django.conf import settings from ...models import PushNotificationTranslation from ...models import Region from ...constants import push_notifications as pnt_const logger = logging.getLogger(__name__) # pylint: disable=too-few-public-methods class PushNotificationSender: """ Sends push notifications via FCM HTTP API. Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json """ fcm_url = "https://fcm.googleapis.com/fcm/send" def __init__(self, push_notification): """ Load relevant push notification translations and prepare content for sending :param push_notification: the push notification that should be sent :type push_notification: ~cms.models.push_notifications.push_notification.PushNotification """ self.push_notification = push_notification self.prepared_pnts = [] self.primary_pnt = PushNotificationTranslation.objects.get( push_notification=push_notification, language=push_notification.region.default_language, ) if len(self.primary_pnt.title) > 0: self.prepared_pnts.append(self.primary_pnt) self.load_secondary_pnts() self.auth_key = self.get_auth_key() def load_secondary_pnts(self): """ Load push notification translations in other languages """ secondary_pnts = PushNotificationTranslation.objects.filter( push_notification=self.push_notification ).exclude(id=self.primary_pnt.id) for secondary_pnt in secondary_pnts: if ( secondary_pnt.title == "" and pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode ): secondary_pnt.title = self.primary_pnt.title secondary_pnt.text = self.primary_pnt.text self.prepared_pnts.append(secondary_pnt) if len(secondary_pnt.title) > 0: self.prepared_pnts.append(secondary_pnt) def is_valid(self): """ Check if all data for sending push notifications is available :return: all prepared push notification translations are valid :rtype: bool """ if self.auth_key is None: return False for pnt in self.prepared_pnts: if not pnt.title: logger.debug("%r has no title", pnt) return False return True @staticmethod def get_auth_key(): """ Get FCM API auth key :return: FCM API auth key :rtype: str """ fcm_auth_config_key = "fcm_auth_key" auth_key = settings.FCM_KEY if auth_key.exists(): logger.debug("Got fcm_auth_key from database") return auth_key.first().value logger.warning( "Could not get %r from configuration database", fcm_auth_config_key ) return None def send_pn(self, pnt): """ Send single push notification translation :param pnt: the prepared push notification translation to be sent :type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation :return: Response of the :mod:`requests` library :rtype: ~requests.Response """ if settings.DEBUG: region_slug = Region.objects.get( id=settings.TEST_BLOG_ID ).slug # Testumgebung - prevent sending PNs to actual users in development else: region_slug = self.push_notification.region.slug payload = { "to": f"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}", "notification": {"title": pnt.title, "body": pnt.text}, "data": { "lanCode": pnt.language.slug, "city": self.push_notification.region.slug, }, } headers = {"Authorization": f"key={self.auth_key}"} return requests.post(self.fcm_url, json=payload, headers=headers) # pylint: disable=too-many-arguments def send_all(self): """ Send all prepared push notification translations :return: Success status :rtype: bool """ status = True for pnt in self.prepared_pnts: res = self.send_pn(pnt) if res.status_code == 200: logger.info("%r sent, FCM id: %r", pnt, res.json()["message_id"]) else: status = False logger.warning( "Received invalid response from FCM for %r, status: %r, body: %r", pnt, res.status_code, res.text, ) return status
34.391608
110
0.610207
543
4,918
5.355433
0.300184
0.126547
0.048143
0.022696
0.129986
0.082531
0.021321
0
0
0
0
0.001458
0.302765
4,918
142
111
34.633803
0.846603
0.242375
0
0.117647
0
0
0.107421
0.021369
0
0
0
0
0
1
0.070588
false
0
0.070588
0
0.247059
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a5fbb70b61ec5fc6c7b862f0da3b78b40dc8aa0
984
py
Python
tests/functional/index/create/test_03.py
reevespaul/firebird-qa
98f16f425aa9ab8ee63b86172f959d63a2d76f21
[ "MIT" ]
null
null
null
tests/functional/index/create/test_03.py
reevespaul/firebird-qa
98f16f425aa9ab8ee63b86172f959d63a2d76f21
[ "MIT" ]
null
null
null
tests/functional/index/create/test_03.py
reevespaul/firebird-qa
98f16f425aa9ab8ee63b86172f959d63a2d76f21
[ "MIT" ]
null
null
null
#coding:utf-8 # # id: functional.index.create.03 # title: CREATE ASC INDEX # decription: CREATE ASC INDEX # # Dependencies: # CREATE DATABASE # CREATE TABLE # SHOW INDEX # tracker_id: # min_versions: [] # versions: 1.0 # qmid: functional.index.create.create_index_03 import pytest from firebird.qa import db_factory, isql_act, Action # version: 1.0 # resources: None substitutions_1 = [] init_script_1 = """CREATE TABLE t( a INTEGER); commit;""" db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = """CREATE ASC INDEX test ON t(a); SHOW INDEX test;""" act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """TEST INDEX ON T(A)""" @pytest.mark.version('>=1.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_expected_stdout == act_1.clean_stdout
23.428571
70
0.654472
139
984
4.374101
0.381295
0.039474
0.069079
0.039474
0
0
0
0
0
0
0
0.03836
0.231707
984
41
71
24
0.765873
0.379065
0
0
0
0
0.183502
0
0
0
0
0
0.066667
1
0.066667
false
0
0.133333
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a678b6dfe1f80688ee851169cd059181b03b309
5,922
py
Python
electrum/dnssec.py
Jesusown/electrum
0df05dd914c823acae1828cad3b20bdeb13150e9
[ "MIT" ]
5,905
2015-01-02T17:05:36.000Z
2022-03-29T07:28:29.000Z
electrum/dnssec.py
Jesusown/electrum
0df05dd914c823acae1828cad3b20bdeb13150e9
[ "MIT" ]
6,097
2015-01-01T21:20:25.000Z
2022-03-31T23:55:01.000Z
electrum/dnssec.py
Jesusown/electrum
0df05dd914c823acae1828cad3b20bdeb13150e9
[ "MIT" ]
2,202
2015-01-02T18:31:25.000Z
2022-03-28T15:35:03.000Z
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2015 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Check DNSSEC trust chain. # Todo: verify expiration dates # # Based on # http://backreference.org/2010/11/17/dnssec-verification-with-dig/ # https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py import dns import dns.name import dns.query import dns.dnssec import dns.message import dns.resolver import dns.rdatatype import dns.rdtypes.ANY.NS import dns.rdtypes.ANY.CNAME import dns.rdtypes.ANY.DLV import dns.rdtypes.ANY.DNSKEY import dns.rdtypes.ANY.DS import dns.rdtypes.ANY.NSEC import dns.rdtypes.ANY.NSEC3 import dns.rdtypes.ANY.NSEC3PARAM import dns.rdtypes.ANY.RRSIG import dns.rdtypes.ANY.SOA import dns.rdtypes.ANY.TXT import dns.rdtypes.IN.A import dns.rdtypes.IN.AAAA from .logging import get_logger _logger = get_logger(__name__) # hard-coded trust anchors (root KSKs) trust_anchors = [ # KSK-2017: dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU='), # KSK-2010: dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='), ] def _check_query(ns, sub, _type, keys): q = dns.message.make_query(sub, _type, want_dnssec=True) response = dns.query.tcp(q, ns, timeout=5) assert response.rcode() == 0, 'No answer' answer = response.answer assert len(answer) != 0, ('No DNS record found', sub, _type) assert len(answer) != 1, ('No DNSSEC record found', sub, _type) if answer[0].rdtype == dns.rdatatype.RRSIG: rrsig, rrset = answer elif answer[1].rdtype == dns.rdatatype.RRSIG: rrset, rrsig = answer else: raise Exception('No signature set in record') if keys is None: keys = {dns.name.from_text(sub):rrset} dns.dnssec.validate(rrset, rrsig, keys) return rrset def _get_and_validate(ns, url, _type): # get trusted root key root_rrset = None for dnskey_rr in trust_anchors: try: # Check if there is a valid signature for the root dnskey root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr}) break except dns.dnssec.ValidationFailure: # It's OK as long as one key validates continue if not root_rrset: raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS') keys = {dns.name.root: root_rrset} # top-down verification parts = url.split('.') for i in range(len(parts), 0, -1): sub = '.'.join(parts[i-1:]) name = dns.name.from_text(sub) # If server is authoritative, don't fetch DNSKEY query = dns.message.make_query(sub, dns.rdatatype.NS) response = dns.query.udp(query, ns, 3) assert response.rcode() == dns.rcode.NOERROR, "query error" rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0] rr = rrset[0] if rr.rdtype == dns.rdatatype.SOA: continue # get DNSKEY (self-signed) rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None) # get DS (signed by parent) ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys) # verify that a signed DS validates DNSKEY for ds in ds_rrset: for dnskey in rrset: htype = 'SHA256' if ds.digest_type == 2 else 'SHA1' good_ds = dns.dnssec.make_ds(name, dnskey, htype) if ds == good_ds: break else: continue break else: raise Exception("DS does not match DNSKEY") # set key for next iteration keys = {name: rrset} # get TXT record (signed by zone) rrset = _check_query(ns, url, _type, keys) return rrset def query(url, rtype): # 8.8.8.8 is Google's public DNS server nameservers = ['8.8.8.8'] ns = nameservers[0] try: out = _get_and_validate(ns, url, rtype) validated = True except Exception as e: _logger.info(f"DNSSEC error: {repr(e)}") out = dns.resolver.resolve(url, rtype) validated = False return out, validated
39.218543
418
0.700777
777
5,922
5.274131
0.365508
0.043924
0.050756
0.051
0.050756
0.015617
0.015617
0
0
0
0
0.035593
0.2077
5,922
150
419
39.48
0.837809
0.295002
0
0.141304
0
0.021739
0.224401
0.165577
0
0
0
0.006667
0.043478
1
0.032609
false
0
0.228261
0
0.293478
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a681bd50a01e317584f76158f59adbe05396fb6
61,870
py
Python
specs/d3d11.py
ds-hwang/apitrace
b74347ebae0d033a013c4de3efb0e9165e9cea8f
[ "MIT" ]
1
2017-06-07T15:28:36.000Z
2017-06-07T15:28:36.000Z
specs/d3d11.py
jciehl/apitrace
0e01acc36de14e9ca7c0ced258767ffb99ac96ea
[ "MIT" ]
null
null
null
specs/d3d11.py
jciehl/apitrace
0e01acc36de14e9ca7c0ced258767ffb99ac96ea
[ "MIT" ]
1
2021-05-21T18:27:29.000Z
2021-05-21T18:27:29.000Z
########################################################################## # # Copyright 2012 Jose Fonseca # All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ##########################################################################/ from dxgi import * from d3dcommon import * from d3d11sdklayers import * HRESULT = MAKE_HRESULT([ "D3D11_ERROR_FILE_NOT_FOUND", "D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS", "D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS", "D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD", "D3DERR_INVALIDCALL", "D3DERR_WASSTILLDRAWING", ]) ID3D11DepthStencilState = Interface("ID3D11DepthStencilState", ID3D11DeviceChild) ID3D11BlendState = Interface("ID3D11BlendState", ID3D11DeviceChild) ID3D11RasterizerState = Interface("ID3D11RasterizerState", ID3D11DeviceChild) ID3D11Resource = Interface("ID3D11Resource", ID3D11DeviceChild) ID3D11Buffer = Interface("ID3D11Buffer", ID3D11Resource) ID3D11Texture1D = Interface("ID3D11Texture1D", ID3D11Resource) ID3D11Texture2D = Interface("ID3D11Texture2D", ID3D11Resource) ID3D11Texture3D = Interface("ID3D11Texture3D", ID3D11Resource) ID3D11View = Interface("ID3D11View", ID3D11DeviceChild) ID3D11ShaderResourceView = Interface("ID3D11ShaderResourceView", ID3D11View) ID3D11RenderTargetView = Interface("ID3D11RenderTargetView", ID3D11View) ID3D11DepthStencilView = Interface("ID3D11DepthStencilView", ID3D11View) ID3D11UnorderedAccessView = Interface("ID3D11UnorderedAccessView", ID3D11View) ID3D11VertexShader = Interface("ID3D11VertexShader", ID3D11DeviceChild) ID3D11HullShader = Interface("ID3D11HullShader", ID3D11DeviceChild) ID3D11DomainShader = Interface("ID3D11DomainShader", ID3D11DeviceChild) ID3D11GeometryShader = Interface("ID3D11GeometryShader", ID3D11DeviceChild) ID3D11PixelShader = Interface("ID3D11PixelShader", ID3D11DeviceChild) ID3D11ComputeShader = Interface("ID3D11ComputeShader", ID3D11DeviceChild) ID3D11InputLayout = Interface("ID3D11InputLayout", ID3D11DeviceChild) ID3D11SamplerState = Interface("ID3D11SamplerState", ID3D11DeviceChild) ID3D11Asynchronous = Interface("ID3D11Asynchronous", ID3D11DeviceChild) ID3D11Query = Interface("ID3D11Query", ID3D11Asynchronous) ID3D11Predicate = Interface("ID3D11Predicate", ID3D11Query) ID3D11Counter = Interface("ID3D11Counter", ID3D11Asynchronous) ID3D11ClassInstance = Interface("ID3D11ClassInstance", ID3D11DeviceChild) ID3D11ClassLinkage = Interface("ID3D11ClassLinkage", ID3D11DeviceChild) ID3D11CommandList = Interface("ID3D11CommandList", ID3D11DeviceChild) ID3D11Device = Interface("ID3D11Device", IUnknown) D3D11_INPUT_CLASSIFICATION = Enum("D3D11_INPUT_CLASSIFICATION", [ "D3D11_INPUT_PER_VERTEX_DATA", "D3D11_INPUT_PER_INSTANCE_DATA", ]) D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [ "D3D11_APPEND_ALIGNED_ELEMENT", ]) D3D11_INPUT_ELEMENT_DESC = Struct("D3D11_INPUT_ELEMENT_DESC", [ (LPCSTR, "SemanticName"), (UINT, "SemanticIndex"), (DXGI_FORMAT, "Format"), (UINT, "InputSlot"), (D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, "AlignedByteOffset"), (D3D11_INPUT_CLASSIFICATION, "InputSlotClass"), (UINT, "InstanceDataStepRate"), ]) D3D11_FILL_MODE = Enum("D3D11_FILL_MODE", [ "D3D11_FILL_WIREFRAME", "D3D11_FILL_SOLID", ]) D3D11_PRIMITIVE_TOPOLOGY = Enum("D3D11_PRIMITIVE_TOPOLOGY", [ "D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED", "D3D11_PRIMITIVE_TOPOLOGY_POINTLIST", "D3D11_PRIMITIVE_TOPOLOGY_LINELIST", "D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP", "D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST", "D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP", "D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ", "D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ", "D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ", "D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ", "D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST", "D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST", ]) D3D11_PRIMITIVE = Enum("D3D11_PRIMITIVE", [ "D3D11_PRIMITIVE_UNDEFINED", "D3D11_PRIMITIVE_POINT", "D3D11_PRIMITIVE_LINE", "D3D11_PRIMITIVE_TRIANGLE", "D3D11_PRIMITIVE_LINE_ADJ", "D3D11_PRIMITIVE_TRIANGLE_ADJ", "D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH", "D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH", ]) D3D11_CULL_MODE = Enum("D3D11_CULL_MODE", [ "D3D11_CULL_NONE", "D3D11_CULL_FRONT", "D3D11_CULL_BACK", ]) D3D11_SO_DECLARATION_ENTRY = Struct("D3D11_SO_DECLARATION_ENTRY", [ (UINT, "Stream"), (LPCSTR, "SemanticName"), (UINT, "SemanticIndex"), (BYTE, "StartComponent"), (BYTE, "ComponentCount"), (BYTE, "OutputSlot"), ]) D3D11_VIEWPORT = Struct("D3D11_VIEWPORT", [ (FLOAT, "TopLeftX"), (FLOAT, "TopLeftY"), (FLOAT, "Width"), (FLOAT, "Height"), (FLOAT, "MinDepth"), (FLOAT, "MaxDepth"), ]) D3D11_RESOURCE_DIMENSION = Enum("D3D11_RESOURCE_DIMENSION", [ "D3D11_RESOURCE_DIMENSION_UNKNOWN", "D3D11_RESOURCE_DIMENSION_BUFFER", "D3D11_RESOURCE_DIMENSION_TEXTURE1D", "D3D11_RESOURCE_DIMENSION_TEXTURE2D", "D3D11_RESOURCE_DIMENSION_TEXTURE3D", ]) D3D11_SRV_DIMENSION = Enum("D3D11_SRV_DIMENSION", [ "D3D11_SRV_DIMENSION_UNKNOWN", "D3D11_SRV_DIMENSION_BUFFER", "D3D11_SRV_DIMENSION_TEXTURE1D", "D3D11_SRV_DIMENSION_TEXTURE1DARRAY", "D3D11_SRV_DIMENSION_TEXTURE2D", "D3D11_SRV_DIMENSION_TEXTURE2DARRAY", "D3D11_SRV_DIMENSION_TEXTURE2DMS", "D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY", "D3D11_SRV_DIMENSION_TEXTURE3D", "D3D11_SRV_DIMENSION_TEXTURECUBE", "D3D11_SRV_DIMENSION_TEXTURECUBEARRAY", "D3D11_SRV_DIMENSION_BUFFEREX", ]) D3D11_DSV_DIMENSION = Enum("D3D11_DSV_DIMENSION", [ "D3D11_DSV_DIMENSION_UNKNOWN", "D3D11_DSV_DIMENSION_TEXTURE1D", "D3D11_DSV_DIMENSION_TEXTURE1DARRAY", "D3D11_DSV_DIMENSION_TEXTURE2D", "D3D11_DSV_DIMENSION_TEXTURE2DARRAY", "D3D11_DSV_DIMENSION_TEXTURE2DMS", "D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY", ]) D3D11_RTV_DIMENSION = Enum("D3D11_RTV_DIMENSION", [ "D3D11_RTV_DIMENSION_UNKNOWN", "D3D11_RTV_DIMENSION_BUFFER", "D3D11_RTV_DIMENSION_TEXTURE1D", "D3D11_RTV_DIMENSION_TEXTURE1DARRAY", "D3D11_RTV_DIMENSION_TEXTURE2D", "D3D11_RTV_DIMENSION_TEXTURE2DARRAY", "D3D11_RTV_DIMENSION_TEXTURE2DMS", "D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY", "D3D11_RTV_DIMENSION_TEXTURE3D", ]) D3D11_UAV_DIMENSION = Enum("D3D11_UAV_DIMENSION", [ "D3D11_UAV_DIMENSION_UNKNOWN", "D3D11_UAV_DIMENSION_BUFFER", "D3D11_UAV_DIMENSION_TEXTURE1D", "D3D11_UAV_DIMENSION_TEXTURE1DARRAY", "D3D11_UAV_DIMENSION_TEXTURE2D", "D3D11_UAV_DIMENSION_TEXTURE2DARRAY", "D3D11_UAV_DIMENSION_TEXTURE3D", ]) D3D11_USAGE = Enum("D3D11_USAGE", [ "D3D11_USAGE_DEFAULT", "D3D11_USAGE_IMMUTABLE", "D3D11_USAGE_DYNAMIC", "D3D11_USAGE_STAGING", ]) D3D11_BIND_FLAG = Flags(UINT, [ "D3D11_BIND_VERTEX_BUFFER", "D3D11_BIND_INDEX_BUFFER", "D3D11_BIND_CONSTANT_BUFFER", "D3D11_BIND_SHADER_RESOURCE", "D3D11_BIND_STREAM_OUTPUT", "D3D11_BIND_RENDER_TARGET", "D3D11_BIND_DEPTH_STENCIL", "D3D11_BIND_UNORDERED_ACCESS", ]) D3D11_CPU_ACCESS_FLAG = Flags(UINT, [ "D3D11_CPU_ACCESS_WRITE", "D3D11_CPU_ACCESS_READ", ]) D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [ "D3D11_RESOURCE_MISC_GENERATE_MIPS", "D3D11_RESOURCE_MISC_SHARED", "D3D11_RESOURCE_MISC_TEXTURECUBE", "D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS", "D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS", "D3D11_RESOURCE_MISC_BUFFER_STRUCTURED", "D3D11_RESOURCE_MISC_RESOURCE_CLAMP", "D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX", "D3D11_RESOURCE_MISC_GDI_COMPATIBLE", ]) D3D11_MAP = Enum("D3D11_MAP", [ "D3D11_MAP_READ", "D3D11_MAP_WRITE", "D3D11_MAP_READ_WRITE", "D3D11_MAP_WRITE_DISCARD", "D3D11_MAP_WRITE_NO_OVERWRITE", ]) D3D11_MAP_FLAG = Flags(UINT, [ "D3D11_MAP_FLAG_DO_NOT_WAIT", ]) D3D11_RAISE_FLAG = Flags(UINT, [ "D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR", ]) D3D11_CLEAR_FLAG = Flags(UINT, [ "D3D11_CLEAR_DEPTH", "D3D11_CLEAR_STENCIL", ]) D3D11_RECT = Alias("D3D11_RECT", RECT) D3D11_BOX = Struct("D3D11_BOX", [ (UINT, "left"), (UINT, "top"), (UINT, "front"), (UINT, "right"), (UINT, "bottom"), (UINT, "back"), ]) ID3D11DeviceChild.methods += [ StdMethod(Void, "GetDevice", [Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice")]), StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), Out(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")]), StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")]), StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")]), ] D3D11_COMPARISON_FUNC = Enum("D3D11_COMPARISON_FUNC", [ "D3D11_COMPARISON_NEVER", "D3D11_COMPARISON_LESS", "D3D11_COMPARISON_EQUAL", "D3D11_COMPARISON_LESS_EQUAL", "D3D11_COMPARISON_GREATER", "D3D11_COMPARISON_NOT_EQUAL", "D3D11_COMPARISON_GREATER_EQUAL", "D3D11_COMPARISON_ALWAYS", ]) D3D11_DEPTH_WRITE_MASK = Enum("D3D11_DEPTH_WRITE_MASK", [ "D3D11_DEPTH_WRITE_MASK_ZERO", "D3D11_DEPTH_WRITE_MASK_ALL", ]) D3D11_STENCIL_OP = Enum("D3D11_STENCIL_OP", [ "D3D11_STENCIL_OP_KEEP", "D3D11_STENCIL_OP_ZERO", "D3D11_STENCIL_OP_REPLACE", "D3D11_STENCIL_OP_INCR_SAT", "D3D11_STENCIL_OP_DECR_SAT", "D3D11_STENCIL_OP_INVERT", "D3D11_STENCIL_OP_INCR", "D3D11_STENCIL_OP_DECR", ]) D3D11_DEPTH_STENCILOP_DESC = Struct("D3D11_DEPTH_STENCILOP_DESC", [ (D3D11_STENCIL_OP, "StencilFailOp"), (D3D11_STENCIL_OP, "StencilDepthFailOp"), (D3D11_STENCIL_OP, "StencilPassOp"), (D3D11_COMPARISON_FUNC, "StencilFunc"), ]) D3D11_DEPTH_STENCIL_DESC = Struct("D3D11_DEPTH_STENCIL_DESC", [ (BOOL, "DepthEnable"), (D3D11_DEPTH_WRITE_MASK, "DepthWriteMask"), (D3D11_COMPARISON_FUNC, "DepthFunc"), (BOOL, "StencilEnable"), (UINT8, "StencilReadMask"), (UINT8, "StencilWriteMask"), (D3D11_DEPTH_STENCILOP_DESC, "FrontFace"), (D3D11_DEPTH_STENCILOP_DESC, "BackFace"), ]) ID3D11DepthStencilState.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), "pDesc")]), ] D3D11_BLEND = Enum("D3D11_BLEND", [ "D3D11_BLEND_ZERO", "D3D11_BLEND_ONE", "D3D11_BLEND_SRC_COLOR", "D3D11_BLEND_INV_SRC_COLOR", "D3D11_BLEND_SRC_ALPHA", "D3D11_BLEND_INV_SRC_ALPHA", "D3D11_BLEND_DEST_ALPHA", "D3D11_BLEND_INV_DEST_ALPHA", "D3D11_BLEND_DEST_COLOR", "D3D11_BLEND_INV_DEST_COLOR", "D3D11_BLEND_SRC_ALPHA_SAT", "D3D11_BLEND_BLEND_FACTOR", "D3D11_BLEND_INV_BLEND_FACTOR", "D3D11_BLEND_SRC1_COLOR", "D3D11_BLEND_INV_SRC1_COLOR", "D3D11_BLEND_SRC1_ALPHA", "D3D11_BLEND_INV_SRC1_ALPHA", ]) D3D11_BLEND_OP = Enum("D3D11_BLEND_OP", [ "D3D11_BLEND_OP_ADD", "D3D11_BLEND_OP_SUBTRACT", "D3D11_BLEND_OP_REV_SUBTRACT", "D3D11_BLEND_OP_MIN", "D3D11_BLEND_OP_MAX", ]) D3D11_COLOR_WRITE_ENABLE = Enum("D3D11_COLOR_WRITE_ENABLE", [ "D3D11_COLOR_WRITE_ENABLE_ALL", "D3D11_COLOR_WRITE_ENABLE_RED", "D3D11_COLOR_WRITE_ENABLE_GREEN", "D3D11_COLOR_WRITE_ENABLE_BLUE", "D3D11_COLOR_WRITE_ENABLE_ALPHA", ]) D3D11_RENDER_TARGET_BLEND_DESC = Struct("D3D11_RENDER_TARGET_BLEND_DESC", [ (BOOL, "BlendEnable"), (D3D11_BLEND, "SrcBlend"), (D3D11_BLEND, "DestBlend"), (D3D11_BLEND_OP, "BlendOp"), (D3D11_BLEND, "SrcBlendAlpha"), (D3D11_BLEND, "DestBlendAlpha"), (D3D11_BLEND_OP, "BlendOpAlpha"), (UINT8, "RenderTargetWriteMask"), ]) D3D11_BLEND_DESC = Struct("D3D11_BLEND_DESC", [ (BOOL, "AlphaToCoverageEnable"), (BOOL, "IndependentBlendEnable"), (Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), "RenderTarget"), ]) ID3D11BlendState.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_BLEND_DESC), "pDesc")]), ] D3D11_RASTERIZER_DESC = Struct("D3D11_RASTERIZER_DESC", [ (D3D11_FILL_MODE, "FillMode"), (D3D11_CULL_MODE, "CullMode"), (BOOL, "FrontCounterClockwise"), (INT, "DepthBias"), (FLOAT, "DepthBiasClamp"), (FLOAT, "SlopeScaledDepthBias"), (BOOL, "DepthClipEnable"), (BOOL, "ScissorEnable"), (BOOL, "MultisampleEnable"), (BOOL, "AntialiasedLineEnable"), ]) ID3D11RasterizerState.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_RASTERIZER_DESC), "pDesc")]), ] D3D11_SUBRESOURCE_DATA = Struct("D3D11_SUBRESOURCE_DATA", [ (OpaquePointer(Const(Void)), "pSysMem"), (UINT, "SysMemPitch"), (UINT, "SysMemSlicePitch"), ]) D3D11_MAPPED_SUBRESOURCE = Struct("D3D11_MAPPED_SUBRESOURCE", [ (OpaquePointer(Void), "pData"), (UINT, "RowPitch"), (UINT, "DepthPitch"), ]) ID3D11Resource.methods += [ StdMethod(Void, "GetType", [Out(Pointer(D3D11_RESOURCE_DIMENSION), "pResourceDimension")]), StdMethod(Void, "SetEvictionPriority", [(UINT, "EvictionPriority")]), StdMethod(UINT, "GetEvictionPriority", []), ] D3D11_BUFFER_DESC = Struct("D3D11_BUFFER_DESC", [ (UINT, "ByteWidth"), (D3D11_USAGE, "Usage"), (D3D11_BIND_FLAG, "BindFlags"), (D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"), (D3D11_RESOURCE_MISC_FLAG, "MiscFlags"), (UINT, "StructureByteStride"), ]) ID3D11Buffer.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_BUFFER_DESC), "pDesc")]), ] D3D11_TEXTURE1D_DESC = Struct("D3D11_TEXTURE1D_DESC", [ (UINT, "Width"), (UINT, "MipLevels"), (UINT, "ArraySize"), (DXGI_FORMAT, "Format"), (D3D11_USAGE, "Usage"), (D3D11_BIND_FLAG, "BindFlags"), (D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"), (D3D11_RESOURCE_MISC_FLAG, "MiscFlags"), ]) ID3D11Texture1D.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE1D_DESC), "pDesc")]), ] D3D11_TEXTURE2D_DESC = Struct("D3D11_TEXTURE2D_DESC", [ (UINT, "Width"), (UINT, "Height"), (UINT, "MipLevels"), (UINT, "ArraySize"), (DXGI_FORMAT, "Format"), (DXGI_SAMPLE_DESC, "SampleDesc"), (D3D11_USAGE, "Usage"), (D3D11_BIND_FLAG, "BindFlags"), (D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"), (D3D11_RESOURCE_MISC_FLAG, "MiscFlags"), ]) ID3D11Texture2D.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE2D_DESC), "pDesc")]), ] D3D11_TEXTURE3D_DESC = Struct("D3D11_TEXTURE3D_DESC", [ (UINT, "Width"), (UINT, "Height"), (UINT, "Depth"), (UINT, "MipLevels"), (DXGI_FORMAT, "Format"), (D3D11_USAGE, "Usage"), (D3D11_BIND_FLAG, "BindFlags"), (D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"), (D3D11_RESOURCE_MISC_FLAG, "MiscFlags"), ]) ID3D11Texture3D.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE3D_DESC), "pDesc")]), ] D3D11_TEXTURECUBE_FACE = Enum("D3D11_TEXTURECUBE_FACE", [ "D3D11_TEXTURECUBE_FACE_POSITIVE_X", "D3D11_TEXTURECUBE_FACE_NEGATIVE_X", "D3D11_TEXTURECUBE_FACE_POSITIVE_Y", "D3D11_TEXTURECUBE_FACE_NEGATIVE_Y", "D3D11_TEXTURECUBE_FACE_POSITIVE_Z", "D3D11_TEXTURECUBE_FACE_NEGATIVE_Z", ]) ID3D11View.methods += [ StdMethod(Void, "GetResource", [Out(Pointer(ObjPointer(ID3D11Resource)), "ppResource")]), ] D3D11_BUFFER_SRV = Struct("D3D11_BUFFER_SRV", [ (Union(None, [(UINT, "FirstElement"), (UINT, "ElementOffset")]), None), (Union(None, [(UINT, "NumElements"), (UINT, "ElementWidth")]), None), ]) D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [ "D3D11_BUFFEREX_SRV_FLAG_RAW", ]) D3D11_BUFFEREX_SRV = Struct("D3D11_BUFFEREX_SRV", [ (UINT, "FirstElement"), (UINT, "NumElements"), (D3D11_BUFFEREX_SRV_FLAG, "Flags"), ]) D3D11_TEX1D_SRV = Struct("D3D11_TEX1D_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), ]) D3D11_TEX1D_ARRAY_SRV = Struct("D3D11_TEX1D_ARRAY_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D11_TEX2D_SRV = Struct("D3D11_TEX2D_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), ]) D3D11_TEX2D_ARRAY_SRV = Struct("D3D11_TEX2D_ARRAY_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D11_TEX3D_SRV = Struct("D3D11_TEX3D_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), ]) D3D11_TEXCUBE_SRV = Struct("D3D11_TEXCUBE_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), ]) D3D11_TEXCUBE_ARRAY_SRV = Struct("D3D11_TEXCUBE_ARRAY_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), (UINT, "First2DArrayFace"), (UINT, "NumCubes"), ]) D3D11_TEX2DMS_SRV = Struct("D3D11_TEX2DMS_SRV", [ (UINT, "UnusedField_NothingToDefine"), ]) D3D11_TEX2DMS_ARRAY_SRV = Struct("D3D11_TEX2DMS_ARRAY_SRV", [ (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D11_SHADER_RESOURCE_VIEW_DESC = Struct("D3D11_SHADER_RESOURCE_VIEW_DESC", [ (DXGI_FORMAT, "Format"), (D3D11_SRV_DIMENSION, "ViewDimension"), (Union(None, [ (D3D11_BUFFER_SRV, "Buffer"), (D3D11_TEX1D_SRV, "Texture1D"), (D3D11_TEX1D_ARRAY_SRV, "Texture1DArray"), (D3D11_TEX2D_SRV, "Texture2D"), (D3D11_TEX2D_ARRAY_SRV, "Texture2DArray"), (D3D11_TEX2DMS_SRV, "Texture2DMS"), (D3D11_TEX2DMS_ARRAY_SRV, "Texture2DMSArray"), (D3D11_TEX3D_SRV, "Texture3D"), (D3D11_TEXCUBE_SRV, "TextureCube"), (D3D11_TEXCUBE_ARRAY_SRV, "TextureCubeArray"), (D3D11_BUFFEREX_SRV, "BufferEx"), ]), None), ]) ID3D11ShaderResourceView.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), "pDesc")]), ] D3D11_BUFFER_RTV = Struct("D3D11_BUFFER_RTV", [ (Union(None, [(UINT, "FirstElement"), (UINT, "ElementOffset")]), None), (Union(None, [(UINT, "NumElements"), (UINT, "ElementWidth")]), None), ]) D3D11_TEX1D_RTV = Struct("D3D11_TEX1D_RTV", [ (UINT, "MipSlice"), ]) D3D11_TEX1D_ARRAY_RTV = Struct("D3D11_TEX1D_ARRAY_RTV", [ (UINT, "MipSlice"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D11_TEX2D_RTV = Struct("D3D11_TEX2D_RTV", [ (UINT, "MipSlice"), ]) D3D11_TEX2DMS_RTV = Struct("D3D11_TEX2DMS_RTV", [ (UINT, "UnusedField_NothingToDefine"), ]) D3D11_TEX2D_ARRAY_RTV = Struct("D3D11_TEX2D_ARRAY_RTV", [ (UINT, "MipSlice"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D11_TEX2DMS_ARRAY_RTV = Struct("D3D11_TEX2DMS_ARRAY_RTV", [ (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D11_TEX3D_RTV = Struct("D3D11_TEX3D_RTV", [ (UINT, "MipSlice"), (UINT, "FirstWSlice"), (UINT, "WSize"), ]) D3D11_RENDER_TARGET_VIEW_DESC = Struct("D3D11_RENDER_TARGET_VIEW_DESC", [ (DXGI_FORMAT, "Format"), (D3D11_RTV_DIMENSION, "ViewDimension"), (Union(None, [ (D3D11_BUFFER_RTV, "Buffer"), (D3D11_TEX1D_RTV, "Texture1D"), (D3D11_TEX1D_ARRAY_RTV, "Texture1DArray"), (D3D11_TEX2D_RTV, "Texture2D"), (D3D11_TEX2D_ARRAY_RTV, "Texture2DArray"), (D3D11_TEX2DMS_RTV, "Texture2DMS"), (D3D11_TEX2DMS_ARRAY_RTV, "Texture2DMSArray"), (D3D11_TEX3D_RTV, "Texture3D"), ]), None), ]) ID3D11RenderTargetView.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), "pDesc")]), ] D3D11_TEX1D_DSV = Struct("D3D11_TEX1D_DSV", [ (UINT, "MipSlice"), ]) D3D11_TEX1D_ARRAY_DSV = Struct("D3D11_TEX1D_ARRAY_DSV", [ (UINT, "MipSlice"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D11_TEX2D_DSV = Struct("D3D11_TEX2D_DSV", [ (UINT, "MipSlice"), ]) D3D11_TEX2D_ARRAY_DSV = Struct("D3D11_TEX2D_ARRAY_DSV", [ (UINT, "MipSlice"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D11_TEX2DMS_DSV = Struct("D3D11_TEX2DMS_DSV", [ (UINT, "UnusedField_NothingToDefine"), ]) D3D11_TEX2DMS_ARRAY_DSV = Struct("D3D11_TEX2DMS_ARRAY_DSV", [ (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D11_DSV_FLAG = Flags(UINT, [ "D3D11_DSV_READ_ONLY_DEPTH", "D3D11_DSV_READ_ONLY_STENCIL", ]) D3D11_DEPTH_STENCIL_VIEW_DESC = Struct("D3D11_DEPTH_STENCIL_VIEW_DESC", [ (DXGI_FORMAT, "Format"), (D3D11_DSV_DIMENSION, "ViewDimension"), (D3D11_DSV_FLAG, "Flags"), (Union(None, [ (D3D11_TEX1D_DSV, "Texture1D"), (D3D11_TEX1D_ARRAY_DSV, "Texture1DArray"), (D3D11_TEX2D_DSV, "Texture2D"), (D3D11_TEX2D_ARRAY_DSV, "Texture2DArray"), (D3D11_TEX2DMS_DSV, "Texture2DMS"), (D3D11_TEX2DMS_ARRAY_DSV, "Texture2DMSArray"), ]), None), ]) ID3D11DepthStencilView.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), "pDesc")]), ] D3D11_BUFFER_UAV_FLAG = Flags(UINT, [ "D3D11_BUFFER_UAV_FLAG_RAW", "D3D11_BUFFER_UAV_FLAG_APPEND", "D3D11_BUFFER_UAV_FLAG_COUNTER", ]) D3D11_BUFFER_UAV = Struct("D3D11_BUFFER_UAV", [ (UINT, "FirstElement"), (UINT, "NumElements"), (D3D11_BUFFER_UAV_FLAG, "Flags"), ]) D3D11_TEX1D_UAV = Struct("D3D11_TEX1D_UAV", [ (UINT, "MipSlice"), ]) D3D11_TEX1D_ARRAY_UAV = Struct("D3D11_TEX1D_ARRAY_UAV", [ (UINT, "MipSlice"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D11_TEX2D_UAV = Struct("D3D11_TEX2D_UAV", [ (UINT, "MipSlice"), ]) D3D11_TEX2D_ARRAY_UAV = Struct("D3D11_TEX2D_ARRAY_UAV", [ (UINT, "MipSlice"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D11_TEX3D_UAV = Struct("D3D11_TEX3D_UAV", [ (UINT, "MipSlice"), (UINT, "FirstWSlice"), (UINT, "WSize"), ]) D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct("D3D11_UNORDERED_ACCESS_VIEW_DESC", [ (DXGI_FORMAT, "Format"), (D3D11_UAV_DIMENSION, "ViewDimension"), (Union(None, [ (D3D11_BUFFER_UAV, "Buffer"), (D3D11_TEX1D_UAV, "Texture1D"), (D3D11_TEX1D_ARRAY_UAV, "Texture1DArray"), (D3D11_TEX2D_UAV, "Texture2D"), (D3D11_TEX2D_ARRAY_UAV, "Texture2DArray"), (D3D11_TEX3D_UAV, "Texture3D"), ]), None), ]) ID3D11UnorderedAccessView.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), "pDesc")]), ] D3D11_FILTER = Enum("D3D11_FILTER", [ "D3D11_FILTER_MIN_MAG_MIP_POINT", "D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR", "D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT", "D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR", "D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT", "D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR", "D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT", "D3D11_FILTER_MIN_MAG_MIP_LINEAR", "D3D11_FILTER_ANISOTROPIC", "D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT", "D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR", "D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT", "D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR", "D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT", "D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR", "D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT", "D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR", "D3D11_FILTER_COMPARISON_ANISOTROPIC", ]) D3D11_FILTER_TYPE = Enum("D3D11_FILTER_TYPE", [ "D3D11_FILTER_TYPE_POINT", "D3D11_FILTER_TYPE_LINEAR", ]) D3D11_TEXTURE_ADDRESS_MODE = Enum("D3D11_TEXTURE_ADDRESS_MODE", [ "D3D11_TEXTURE_ADDRESS_WRAP", "D3D11_TEXTURE_ADDRESS_MIRROR", "D3D11_TEXTURE_ADDRESS_CLAMP", "D3D11_TEXTURE_ADDRESS_BORDER", "D3D11_TEXTURE_ADDRESS_MIRROR_ONCE", ]) D3D11_SAMPLER_DESC = Struct("D3D11_SAMPLER_DESC", [ (D3D11_FILTER, "Filter"), (D3D11_TEXTURE_ADDRESS_MODE, "AddressU"), (D3D11_TEXTURE_ADDRESS_MODE, "AddressV"), (D3D11_TEXTURE_ADDRESS_MODE, "AddressW"), (FLOAT, "MipLODBias"), (UINT, "MaxAnisotropy"), (D3D11_COMPARISON_FUNC, "ComparisonFunc"), (Array(FLOAT, 4), "BorderColor"), (FLOAT, "MinLOD"), (FLOAT, "MaxLOD"), ]) ID3D11SamplerState.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_SAMPLER_DESC), "pDesc")]), ] D3D11_FORMAT_SUPPORT = Flags(UINT, [ "D3D11_FORMAT_SUPPORT_BUFFER", "D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER", "D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER", "D3D11_FORMAT_SUPPORT_SO_BUFFER", "D3D11_FORMAT_SUPPORT_TEXTURE1D", "D3D11_FORMAT_SUPPORT_TEXTURE2D", "D3D11_FORMAT_SUPPORT_TEXTURE3D", "D3D11_FORMAT_SUPPORT_TEXTURECUBE", "D3D11_FORMAT_SUPPORT_SHADER_LOAD", "D3D11_FORMAT_SUPPORT_SHADER_SAMPLE", "D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON", "D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT", "D3D11_FORMAT_SUPPORT_MIP", "D3D11_FORMAT_SUPPORT_MIP_AUTOGEN", "D3D11_FORMAT_SUPPORT_RENDER_TARGET", "D3D11_FORMAT_SUPPORT_BLENDABLE", "D3D11_FORMAT_SUPPORT_DEPTH_STENCIL", "D3D11_FORMAT_SUPPORT_CPU_LOCKABLE", "D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE", "D3D11_FORMAT_SUPPORT_DISPLAY", "D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT", "D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET", "D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD", "D3D11_FORMAT_SUPPORT_SHADER_GATHER", "D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST", "D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW", "D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON", ]) D3D11_FORMAT_SUPPORT2 = Enum("D3D11_FORMAT_SUPPORT2", [ "D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD", "D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS", "D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE", "D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE", "D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX", "D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX", "D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD", "D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE", ]) ID3D11Asynchronous.methods += [ StdMethod(UINT, "GetDataSize", []), ] D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [ "D3D11_ASYNC_GETDATA_DONOTFLUSH", ]) D3D11_QUERY = Enum("D3D11_QUERY", [ "D3D11_QUERY_EVENT", "D3D11_QUERY_OCCLUSION", "D3D11_QUERY_TIMESTAMP", "D3D11_QUERY_TIMESTAMP_DISJOINT", "D3D11_QUERY_PIPELINE_STATISTICS", "D3D11_QUERY_OCCLUSION_PREDICATE", "D3D11_QUERY_SO_STATISTICS", "D3D11_QUERY_SO_OVERFLOW_PREDICATE", "D3D11_QUERY_SO_STATISTICS_STREAM0", "D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0", "D3D11_QUERY_SO_STATISTICS_STREAM1", "D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1", "D3D11_QUERY_SO_STATISTICS_STREAM2", "D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2", "D3D11_QUERY_SO_STATISTICS_STREAM3", "D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3", ]) D3D11_QUERY_MISC_FLAG = Flags(UINT, [ "D3D11_QUERY_MISC_PREDICATEHINT", ]) D3D11_QUERY_DESC = Struct("D3D11_QUERY_DESC", [ (D3D11_QUERY, "Query"), (D3D11_QUERY_MISC_FLAG, "MiscFlags"), ]) ID3D11Query.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_QUERY_DESC), "pDesc")]), ] D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct("D3D11_QUERY_DATA_TIMESTAMP_DISJOINT", [ (UINT64, "Frequency"), (BOOL, "Disjoint"), ]) D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct("D3D11_QUERY_DATA_PIPELINE_STATISTICS", [ (UINT64, "IAVertices"), (UINT64, "IAPrimitives"), (UINT64, "VSInvocations"), (UINT64, "GSInvocations"), (UINT64, "GSPrimitives"), (UINT64, "CInvocations"), (UINT64, "CPrimitives"), (UINT64, "PSInvocations"), (UINT64, "HSInvocations"), (UINT64, "DSInvocations"), (UINT64, "CSInvocations"), ]) D3D11_QUERY_DATA_SO_STATISTICS = Struct("D3D11_QUERY_DATA_SO_STATISTICS", [ (UINT64, "NumPrimitivesWritten"), (UINT64, "PrimitivesStorageNeeded"), ]) D3D11_COUNTER = Enum("D3D11_COUNTER", [ "D3D11_COUNTER_DEVICE_DEPENDENT_0", ]) D3D11_COUNTER_TYPE = Enum("D3D11_COUNTER_TYPE", [ "D3D11_COUNTER_TYPE_FLOAT32", "D3D11_COUNTER_TYPE_UINT16", "D3D11_COUNTER_TYPE_UINT32", "D3D11_COUNTER_TYPE_UINT64", ]) D3D11_COUNTER_DESC = Struct("D3D11_COUNTER_DESC", [ (D3D11_COUNTER, "Counter"), (UINT, "MiscFlags"), ]) D3D11_COUNTER_INFO = Struct("D3D11_COUNTER_INFO", [ (D3D11_COUNTER, "LastDeviceDependentCounter"), (UINT, "NumSimultaneousCounters"), (UINT8, "NumDetectableParallelUnits"), ]) ID3D11Counter.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_COUNTER_DESC), "pDesc")]), ] D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum("D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS", [ "D3D11_STANDARD_MULTISAMPLE_PATTERN", "D3D11_CENTER_MULTISAMPLE_PATTERN", ]) D3D11_DEVICE_CONTEXT_TYPE = Enum("D3D11_DEVICE_CONTEXT_TYPE", [ "D3D11_DEVICE_CONTEXT_IMMEDIATE", "D3D11_DEVICE_CONTEXT_DEFERRED", ]) D3D11_CLASS_INSTANCE_DESC = Struct("D3D11_CLASS_INSTANCE_DESC", [ (UINT, "InstanceId"), (UINT, "InstanceIndex"), (UINT, "TypeId"), (UINT, "ConstantBuffer"), (UINT, "BaseConstantBufferOffset"), (UINT, "BaseTexture"), (UINT, "BaseSampler"), (BOOL, "Created"), ]) ID3D11ClassInstance.methods += [ StdMethod(Void, "GetClassLinkage", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), "ppLinkage")]), StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), "pDesc")]), StdMethod(Void, "GetInstanceName", [Out(LPSTR, "pInstanceName"), Out(Pointer(SIZE_T), "pBufferLength")]), StdMethod(Void, "GetTypeName", [Out(LPSTR, "pTypeName"), Out(Pointer(SIZE_T), "pBufferLength")]), ] ID3D11ClassLinkage.methods += [ StdMethod(HRESULT, "GetClassInstance", [(LPCSTR, "pClassInstanceName"), (UINT, "InstanceIndex"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), "ppInstance")]), StdMethod(HRESULT, "CreateClassInstance", [(LPCSTR, "pClassTypeName"), (UINT, "ConstantBufferOffset"), (UINT, "ConstantVectorOffset"), (UINT, "TextureOffset"), (UINT, "SamplerOffset"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), "ppInstance")]), ] ID3D11CommandList.methods += [ StdMethod(UINT, "GetContextFlags", []), ] D3D11_FEATURE_DATA_THREADING = Struct("D3D11_FEATURE_DATA_THREADING", [ (BOOL, "DriverConcurrentCreates"), (BOOL, "DriverCommandLists"), ]) D3D11_FEATURE_DATA_DOUBLES = Struct("D3D11_FEATURE_DATA_DOUBLES", [ (BOOL, "DoublePrecisionFloatShaderOps"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct("D3D11_FEATURE_DATA_FORMAT_SUPPORT", [ (DXGI_FORMAT, "InFormat"), (D3D11_FORMAT_SUPPORT, "OutFormatSupport"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct("D3D11_FEATURE_DATA_FORMAT_SUPPORT2", [ (DXGI_FORMAT, "InFormat"), (D3D11_FORMAT_SUPPORT2, "OutFormatSupport2"), ]) D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct("D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS", [ (BOOL, "ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x"), ]) D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic("D3D11_FEATURE", "Feature", [ ("D3D11_FEATURE_THREADING", Pointer(D3D11_FEATURE_DATA_THREADING)), ("D3D11_FEATURE_DOUBLES", Pointer(D3D11_FEATURE_DATA_DOUBLES)), ("D3D11_FEATURE_FORMAT_SUPPORT", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)), ("D3D11_FEATURE_FORMAT_SUPPORT2", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)), ("D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)), ], Blob(Void, "FeatureSupportDataSize"), False) ID3D11DeviceContext.methods += [ StdMethod(Void, "VSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "PSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "PSSetShader", [(ObjPointer(ID3D11PixelShader), "pPixelShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]), StdMethod(Void, "PSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "VSSetShader", [(ObjPointer(ID3D11VertexShader), "pVertexShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]), StdMethod(Void, "DrawIndexed", [(UINT, "IndexCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation")]), StdMethod(Void, "Draw", [(UINT, "VertexCount"), (UINT, "StartVertexLocation")]), StdMethod(HRESULT, "Map", [(ObjPointer(ID3D11Resource), "pResource"), (UINT, "Subresource"), (D3D11_MAP, "MapType"), (D3D11_MAP_FLAG, "MapFlags"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), "pMappedResource")]), StdMethod(Void, "Unmap", [(ObjPointer(ID3D11Resource), "pResource"), (UINT, "Subresource")]), StdMethod(Void, "PSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "IASetInputLayout", [(ObjPointer(ID3D11InputLayout), "pInputLayout")]), StdMethod(Void, "IASetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppVertexBuffers"), (Pointer(Const(UINT)), "pStrides"), (Pointer(Const(UINT)), "pOffsets")]), StdMethod(Void, "IASetIndexBuffer", [(ObjPointer(ID3D11Buffer), "pIndexBuffer"), (DXGI_FORMAT, "Format"), (UINT, "Offset")]), StdMethod(Void, "DrawIndexedInstanced", [(UINT, "IndexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation"), (UINT, "StartInstanceLocation")]), StdMethod(Void, "DrawInstanced", [(UINT, "VertexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartVertexLocation"), (UINT, "StartInstanceLocation")]), StdMethod(Void, "GSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "GSSetShader", [(ObjPointer(ID3D11GeometryShader), "pShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]), StdMethod(Void, "IASetPrimitiveTopology", [(D3D11_PRIMITIVE_TOPOLOGY, "Topology")]), StdMethod(Void, "VSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "VSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "Begin", [(ObjPointer(ID3D11Asynchronous), "pAsync")]), StdMethod(Void, "End", [(ObjPointer(ID3D11Asynchronous), "pAsync")]), StdMethod(HRESULT, "GetData", [(ObjPointer(ID3D11Asynchronous), "pAsync"), Out(OpaqueBlob(Void, "DataSize"), "pData"), (UINT, "DataSize"), (D3D11_ASYNC_GETDATA_FLAG, "GetDataFlags")]), StdMethod(Void, "SetPredication", [(ObjPointer(ID3D11Predicate), "pPredicate"), (BOOL, "PredicateValue")]), StdMethod(Void, "GSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "GSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "OMSetRenderTargets", [(UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), "NumViews"), "ppRenderTargetViews"), (ObjPointer(ID3D11DepthStencilView), "pDepthStencilView")]), StdMethod(Void, "OMSetRenderTargetsAndUnorderedAccessViews", [(UINT, "NumRTVs"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), "NumRTVs"), "ppRenderTargetViews"), (ObjPointer(ID3D11DepthStencilView), "pDepthStencilView"), (UINT, "UAVStartSlot"), (UINT, "NumUAVs"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), "NumUAVs"), "ppUnorderedAccessViews"), (Pointer(Const(UINT)), "pUAVInitialCounts")]), StdMethod(Void, "OMSetBlendState", [(ObjPointer(ID3D11BlendState), "pBlendState"), (Array(Const(FLOAT), 4), "BlendFactor"), (UINT, "SampleMask")]), StdMethod(Void, "OMSetDepthStencilState", [(ObjPointer(ID3D11DepthStencilState), "pDepthStencilState"), (UINT, "StencilRef")]), StdMethod(Void, "SOSetTargets", [(UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppSOTargets"), (Pointer(Const(UINT)), "pOffsets")]), StdMethod(Void, "DrawAuto", []), StdMethod(Void, "DrawIndexedInstancedIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]), StdMethod(Void, "DrawInstancedIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]), StdMethod(Void, "Dispatch", [(UINT, "ThreadGroupCountX"), (UINT, "ThreadGroupCountY"), (UINT, "ThreadGroupCountZ")]), StdMethod(Void, "DispatchIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]), StdMethod(Void, "RSSetState", [(ObjPointer(ID3D11RasterizerState), "pRasterizerState")]), StdMethod(Void, "RSSetViewports", [(UINT, "NumViewports"), (Array(Const(D3D11_VIEWPORT), "NumViewports"), "pViewports")]), StdMethod(Void, "RSSetScissorRects", [(UINT, "NumRects"), (Array(Const(D3D11_RECT), "NumRects"), "pRects")]), StdMethod(Void, "CopySubresourceRegion", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (UINT, "DstX"), (UINT, "DstY"), (UINT, "DstZ"), (ObjPointer(ID3D11Resource), "pSrcResource"), (UINT, "SrcSubresource"), (Pointer(Const(D3D11_BOX)), "pSrcBox")]), StdMethod(Void, "CopyResource", [(ObjPointer(ID3D11Resource), "pDstResource"), (ObjPointer(ID3D11Resource), "pSrcResource")]), StdMethod(Void, "UpdateSubresource", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (Pointer(Const(D3D11_BOX)), "pDstBox"), (OpaquePointer(Const(Void)), "pSrcData"), (UINT, "SrcRowPitch"), (UINT, "SrcDepthPitch")]), StdMethod(Void, "CopyStructureCount", [(ObjPointer(ID3D11Buffer), "pDstBuffer"), (UINT, "DstAlignedByteOffset"), (ObjPointer(ID3D11UnorderedAccessView), "pSrcView")]), StdMethod(Void, "ClearRenderTargetView", [(ObjPointer(ID3D11RenderTargetView), "pRenderTargetView"), (Array(Const(FLOAT), 4), "ColorRGBA")]), StdMethod(Void, "ClearUnorderedAccessViewUint", [(ObjPointer(ID3D11UnorderedAccessView), "pUnorderedAccessView"), (Array(Const(UINT), 4), "Values")]), StdMethod(Void, "ClearUnorderedAccessViewFloat", [(ObjPointer(ID3D11UnorderedAccessView), "pUnorderedAccessView"), (Array(Const(FLOAT), 4), "Values")]), StdMethod(Void, "ClearDepthStencilView", [(ObjPointer(ID3D11DepthStencilView), "pDepthStencilView"), (D3D11_CLEAR_FLAG, "ClearFlags"), (FLOAT, "Depth"), (UINT8, "Stencil")]), StdMethod(Void, "GenerateMips", [(ObjPointer(ID3D11ShaderResourceView), "pShaderResourceView")]), StdMethod(Void, "SetResourceMinLOD", [(ObjPointer(ID3D11Resource), "pResource"), (FLOAT, "MinLOD")]), StdMethod(FLOAT, "GetResourceMinLOD", [(ObjPointer(ID3D11Resource), "pResource")]), StdMethod(Void, "ResolveSubresource", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (ObjPointer(ID3D11Resource), "pSrcResource"), (UINT, "SrcSubresource"), (DXGI_FORMAT, "Format")]), StdMethod(Void, "ExecuteCommandList", [(ObjPointer(ID3D11CommandList), "pCommandList"), (BOOL, "RestoreContextState")]), StdMethod(Void, "HSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "HSSetShader", [(ObjPointer(ID3D11HullShader), "pHullShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]), StdMethod(Void, "HSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "HSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "DSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "DSSetShader", [(ObjPointer(ID3D11DomainShader), "pDomainShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]), StdMethod(Void, "DSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "DSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "CSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "CSSetUnorderedAccessViews", [(UINT, "StartSlot"), (UINT, "NumUAVs"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), "NumUAVs"), "ppUnorderedAccessViews"), (Pointer(Const(UINT)), "pUAVInitialCounts")]), StdMethod(Void, "CSSetShader", [(ObjPointer(ID3D11ComputeShader), "pComputeShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]), StdMethod(Void, "CSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "CSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "VSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "PSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "PSGetShader", [Out(Pointer(ObjPointer(ID3D11PixelShader)), "ppPixelShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]), StdMethod(Void, "PSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "VSGetShader", [Out(Pointer(ObjPointer(ID3D11VertexShader)), "ppVertexShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]), StdMethod(Void, "PSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "IAGetInputLayout", [Out(Pointer(ObjPointer(ID3D11InputLayout)), "ppInputLayout")]), StdMethod(Void, "IAGetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppVertexBuffers"), Out(Pointer(UINT), "pStrides"), Out(Pointer(UINT), "pOffsets")]), StdMethod(Void, "IAGetIndexBuffer", [Out(Pointer(ObjPointer(ID3D11Buffer)), "pIndexBuffer"), Out(Pointer(DXGI_FORMAT), "Format"), Out(Pointer(UINT), "Offset")]), StdMethod(Void, "GSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "GSGetShader", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]), StdMethod(Void, "IAGetPrimitiveTopology", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), "pTopology")]), StdMethod(Void, "VSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "VSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "GetPredication", [Out(Pointer(ObjPointer(ID3D11Predicate)), "ppPredicate"), Out(Pointer(BOOL), "pPredicateValue")]), StdMethod(Void, "GSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "GSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "OMGetRenderTargets", [(UINT, "NumViews"), (Array(ObjPointer(ID3D11RenderTargetView), "NumViews"), "ppRenderTargetViews"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView")]), StdMethod(Void, "OMGetRenderTargetsAndUnorderedAccessViews", [(UINT, "NumRTVs"), (Array(ObjPointer(ID3D11RenderTargetView), "NumRTVs"), "ppRenderTargetViews"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView"), (UINT, "UAVStartSlot"), (UINT, "NumUAVs"), (Array(ObjPointer(ID3D11UnorderedAccessView), "NumUAVs"), "ppUnorderedAccessViews")]), StdMethod(Void, "OMGetBlendState", [Out(Pointer(ObjPointer(ID3D11BlendState)), "ppBlendState"), Out(Array(FLOAT, 4), "BlendFactor"), Out(Pointer(UINT), "pSampleMask")]), StdMethod(Void, "OMGetDepthStencilState", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), "ppDepthStencilState"), Out(Pointer(UINT), "pStencilRef")]), StdMethod(Void, "SOGetTargets", [(UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppSOTargets")]), StdMethod(Void, "RSGetState", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), "ppRasterizerState")]), StdMethod(Void, "RSGetViewports", [Out(Pointer(UINT), "pNumViewports"), Out(Array(D3D11_VIEWPORT, "*pNumViewports"), "pViewports")]), StdMethod(Void, "RSGetScissorRects", [Out(Pointer(UINT), "pNumRects"), Out(Array(D3D11_RECT, "*pNumRects"), "pRects")]), StdMethod(Void, "HSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "HSGetShader", [Out(Pointer(ObjPointer(ID3D11HullShader)), "ppHullShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]), StdMethod(Void, "HSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "HSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "DSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "DSGetShader", [Out(Pointer(ObjPointer(ID3D11DomainShader)), "ppDomainShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]), StdMethod(Void, "DSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "DSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "CSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "CSGetUnorderedAccessViews", [(UINT, "StartSlot"), (UINT, "NumUAVs"), (Array(ObjPointer(ID3D11UnorderedAccessView), "NumUAVs"), "ppUnorderedAccessViews")]), StdMethod(Void, "CSGetShader", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), "ppComputeShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]), StdMethod(Void, "CSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "CSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "ClearState", []), StdMethod(Void, "Flush", []), StdMethod(D3D11_DEVICE_CONTEXT_TYPE, "GetType", []), StdMethod(UINT, "GetContextFlags", []), StdMethod(HRESULT, "FinishCommandList", [(BOOL, "RestoreDeferredContextState"), Out(Pointer(ObjPointer(ID3D11CommandList)), "ppCommandList")]), ] D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [ "D3D11_CREATE_DEVICE_SINGLETHREADED", "D3D11_CREATE_DEVICE_DEBUG", "D3D11_CREATE_DEVICE_SWITCH_TO_REF", "D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS", "D3D11_CREATE_DEVICE_BGRA_SUPPORT", ]) ID3D11Device.methods += [ StdMethod(HRESULT, "CreateBuffer", [(Pointer(Const(D3D11_BUFFER_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Buffer)), "ppBuffer")]), StdMethod(HRESULT, "CreateTexture1D", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture1D)), "ppTexture1D")]), StdMethod(HRESULT, "CreateTexture2D", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture2D)), "ppTexture2D")]), StdMethod(HRESULT, "CreateTexture3D", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture3D)), "ppTexture3D")]), StdMethod(HRESULT, "CreateShaderResourceView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), "ppSRView")]), StdMethod(HRESULT, "CreateUnorderedAccessView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), "ppUAView")]), StdMethod(HRESULT, "CreateRenderTargetView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), "ppRTView")]), StdMethod(HRESULT, "CreateDepthStencilView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView")]), StdMethod(HRESULT, "CreateInputLayout", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), "NumElements"), "pInputElementDescs"), (UINT, "NumElements"), (Blob(Const(Void), "BytecodeLength"), "pShaderBytecodeWithInputSignature"), (SIZE_T, "BytecodeLength"), Out(Pointer(ObjPointer(ID3D11InputLayout)), "ppInputLayout")]), StdMethod(HRESULT, "CreateVertexShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11VertexShader)), "ppVertexShader")]), StdMethod(HRESULT, "CreateGeometryShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader")]), StdMethod(HRESULT, "CreateGeometryShaderWithStreamOutput", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), "NumEntries"), "pSODeclaration"), (UINT, "NumEntries"), (Array(Const(UINT), "NumStrides"), "pBufferStrides"), (UINT, "NumStrides"), (UINT, "RasterizedStream"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader")]), StdMethod(HRESULT, "CreatePixelShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11PixelShader)), "ppPixelShader")]), StdMethod(HRESULT, "CreateHullShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11HullShader)), "ppHullShader")]), StdMethod(HRESULT, "CreateDomainShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11DomainShader)), "ppDomainShader")]), StdMethod(HRESULT, "CreateComputeShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), "ppComputeShader")]), StdMethod(HRESULT, "CreateClassLinkage", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), "ppLinkage")]), StdMethod(HRESULT, "CreateBlendState", [(Pointer(Const(D3D11_BLEND_DESC)), "pBlendStateDesc"), Out(Pointer(ObjPointer(ID3D11BlendState)), "ppBlendState")]), StdMethod(HRESULT, "CreateDepthStencilState", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), "pDepthStencilDesc"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), "ppDepthStencilState")]), StdMethod(HRESULT, "CreateRasterizerState", [(Pointer(Const(D3D11_RASTERIZER_DESC)), "pRasterizerDesc"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), "ppRasterizerState")]), StdMethod(HRESULT, "CreateSamplerState", [(Pointer(Const(D3D11_SAMPLER_DESC)), "pSamplerDesc"), Out(Pointer(ObjPointer(ID3D11SamplerState)), "ppSamplerState")]), StdMethod(HRESULT, "CreateQuery", [(Pointer(Const(D3D11_QUERY_DESC)), "pQueryDesc"), Out(Pointer(ObjPointer(ID3D11Query)), "ppQuery")]), StdMethod(HRESULT, "CreatePredicate", [(Pointer(Const(D3D11_QUERY_DESC)), "pPredicateDesc"), Out(Pointer(ObjPointer(ID3D11Predicate)), "ppPredicate")]), StdMethod(HRESULT, "CreateCounter", [(Pointer(Const(D3D11_COUNTER_DESC)), "pCounterDesc"), Out(Pointer(ObjPointer(ID3D11Counter)), "ppCounter")]), StdMethod(HRESULT, "CreateDeferredContext", [(UINT, "ContextFlags"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppDeferredContext")]), StdMethod(HRESULT, "OpenSharedResource", [(HANDLE, "hResource"), (REFIID, "ReturnedInterface"), Out(Pointer(ObjPointer(Void)), "ppResource")]), StdMethod(HRESULT, "CheckFormatSupport", [(DXGI_FORMAT, "Format"), Out(Pointer(D3D11_FORMAT_SUPPORT), "pFormatSupport")]), StdMethod(HRESULT, "CheckMultisampleQualityLevels", [(DXGI_FORMAT, "Format"), (UINT, "SampleCount"), Out(Pointer(UINT), "pNumQualityLevels")]), StdMethod(Void, "CheckCounterInfo", [Out(Pointer(D3D11_COUNTER_INFO), "pCounterInfo")]), StdMethod(HRESULT, "CheckCounter", [(Pointer(Const(D3D11_COUNTER_DESC)), "pDesc"), Out(Pointer(D3D11_COUNTER_TYPE), "pType"), Out(Pointer(UINT), "pActiveCounters"), Out(LPSTR, "szName"), Out(Pointer(UINT), "pNameLength"), Out(LPSTR, "szUnits"), Out(Pointer(UINT), "pUnitsLength"), Out(LPSTR, "szDescription"), Out(Pointer(UINT), "pDescriptionLength")]), StdMethod(HRESULT, "CheckFeatureSupport", [(D3D11_FEATURE, "Feature"), Out(D3D11_FEATURE_DATA, "pFeatureSupportData"), (UINT, "FeatureSupportDataSize")]), StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), Out(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")]), StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")]), StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")]), StdMethod(D3D_FEATURE_LEVEL, "GetFeatureLevel", []), StdMethod(D3D11_CREATE_DEVICE_FLAG, "GetCreationFlags", []), StdMethod(HRESULT, "GetDeviceRemovedReason", []), StdMethod(Void, "GetImmediateContext", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]), StdMethod(HRESULT, "SetExceptionMode", [(D3D11_RAISE_FLAG, "RaiseFlags")]), StdMethod(UINT, "GetExceptionMode", []), ] d3d11 = API("d3d11") d3d11.addFunctions([ StdFunction(HRESULT, "D3D11CreateDevice", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D11_CREATE_DEVICE_FLAG, "Flags"), (Array(Const(D3D_FEATURE_LEVEL), "FeatureLevels"), "pFeatureLevels"), (UINT, "FeatureLevels"), (UINT, "SDKVersion"), Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice"), Out(Pointer(D3D_FEATURE_LEVEL), "pFeatureLevel"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]), StdFunction(HRESULT, "D3D11CreateDeviceAndSwapChain", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D11_CREATE_DEVICE_FLAG, "Flags"), (Array(Const(D3D_FEATURE_LEVEL), "FeatureLevels"), "pFeatureLevels"), (UINT, "FeatureLevels"), (UINT, "SDKVersion"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), "pSwapChainDesc"), Out(Pointer(ObjPointer(IDXGISwapChain)), "ppSwapChain"), Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice"), Out(Pointer(D3D_FEATURE_LEVEL), "pFeatureLevel"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]), # XXX: Undocumented functions, called by d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG is set StdFunction(HRESULT, "D3D11CoreRegisterLayers", [LPCVOID, DWORD], internal=True), StdFunction(SIZE_T, "D3D11CoreGetLayeredDeviceSize", [LPCVOID, DWORD], internal=True), StdFunction(HRESULT, "D3D11CoreCreateLayeredDevice", [LPCVOID, DWORD, LPCVOID, (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppvObj")], internal=True), StdFunction(HRESULT, "D3D11CoreCreateDevice", [DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD], internal=True), ]) d3d11.addInterfaces([ IDXGIAdapter1, IDXGIDevice1, IDXGIResource, ID3D11Debug, ID3D11InfoQueue, ID3D11SwitchToRef, ])
50.016168
596
0.739227
5,939
61,870
7.334063
0.159286
0.037606
0.024336
0.019101
0.440526
0.327731
0.251578
0.217508
0.207177
0.190991
0
0.058657
0.107225
61,870
1,236
597
50.056634
0.729904
0.018846
0
0.214019
0
0
0.419811
0.220379
0
0
0
0
0
1
0
false
0.000935
0.002804
0
0.002804
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a682a5b6be55bf5cb429b4f53cde390f56c0458
1,244
py
Python
day08.py
Pil0u/adventofcode2020
97a6c291fc1653bcb1ea7abd7f38e71e2c0458f8
[ "MIT" ]
null
null
null
day08.py
Pil0u/adventofcode2020
97a6c291fc1653bcb1ea7abd7f38e71e2c0458f8
[ "MIT" ]
null
null
null
day08.py
Pil0u/adventofcode2020
97a6c291fc1653bcb1ea7abd7f38e71e2c0458f8
[ "MIT" ]
null
null
null
from copy import deepcopy def boot(seq): index = 0 played_indices = set() acc = 0 while True: if index == len(seq): return True, acc if index in played_indices: return False, acc played_indices.add(index) line = seq[index].split() op = line[0] value = int(line[1]) if op == 'nop': index += 1 if op == 'acc': acc += value index += 1 if op == 'jmp': index += value def generate_sequences(list_): all_seqs = [] for idx, value in enumerate(list_): if value[:3] == 'nop': seq = deepcopy(list_) seq[idx] = 'jmp' + value[3:] all_seqs.append(seq) if value[:3] == 'jmp': seq = deepcopy(list_) seq[idx] = 'nop' + value[3:] all_seqs.append(seq) return all_seqs def result(input_): # Part 1 part_one = boot(input_)[1] # Part 2 all_sequences = generate_sequences(input_) for sequence in all_sequences: result = boot(sequence) if result[0] is not False: part_two = result[1] break return part_one, part_two
19.4375
46
0.498392
151
1,244
3.960265
0.324503
0.046823
0.025084
0.033445
0.143813
0.073579
0
0
0
0
0
0.019815
0.391479
1,244
63
47
19.746032
0.770145
0.01045
0
0.142857
0
0
0.017101
0
0
0
0
0
0
1
0.071429
false
0
0.02381
0
0.190476
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a685db25a2acacd77798f8f41ad85739a6b001d
3,825
py
Python
train_fcn.py
onlyNata/segModel
7a823e096b3ed7f554a331c5fba39e24c9e0d8bf
[ "MIT" ]
3
2018-07-02T06:15:36.000Z
2019-06-10T06:26:18.000Z
train_fcn.py
onlyNata/segModel
7a823e096b3ed7f554a331c5fba39e24c9e0d8bf
[ "MIT" ]
null
null
null
train_fcn.py
onlyNata/segModel
7a823e096b3ed7f554a331c5fba39e24c9e0d8bf
[ "MIT" ]
1
2018-10-19T08:07:59.000Z
2018-10-19T08:07:59.000Z
# -*- coding: utf-8 -*- """ Created on Tue Jun 26 16:34:21 2018 @author: LiHongWang """ import os import tensorflow as tf from model import fcn_vgg from model import fcn_mobile from model import fcn_resnet_v2 from data import input_data slim = tf.contrib.slim def main(): num_classes=2 tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord' train_dir = './fm2/' if not os.path.exists(train_dir): os.makedirs(train_dir) with tf.Graph().as_default(): global_step = tf.contrib.framework.get_or_create_global_step() tf.logging.set_verbosity(tf.logging.INFO) with tf.device("/cpu:0"): samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66, crop_size=[224,224], batch_size=4) batch_queue = slim.prefetch_queue.prefetch_queue(samples, capacity=128 ) tra_batch = batch_queue.dequeue() logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes) # logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes) # logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes) cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name="entropy") loss = tf.reduce_mean(cross_entropy,name='loss') slim.losses.add_loss(loss) total_loss = slim.losses.get_total_loss() # print("image", tra_batch['image']) # print("label", tf.cast(tra_batch['label']*255, tf.uint8)) # print("prediction", tf.cast(prediction*255, tf.uint8)) # Create some summaries to visualize the training process: tf.summary.scalar('losses/Total_Loss', total_loss) tf.summary.image("image", tra_batch['image'], max_outputs=4) tf.summary.image("label", tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4) tf.summary.image("prediction", tf.cast(prediction*255, tf.uint8), max_outputs=4) lr = tf.train.exponential_decay(0.001, global_step, 10000, 0.8, staircase=True) #lr = tf.constant(0.001, tf.float32) tf.summary.scalar('learning_rate', lr) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) # Specify the optimizer and create the train op: optimizer = tf.train.RMSPropOptimizer(lr,0.9) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training: gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7) config=tf.ConfigProto(gpu_options=gpu_options) final_loss = slim.learning.train(train_op, logdir=train_dir, log_every_n_steps=100, save_summaries_secs=20, save_interval_secs=1800, init_fn=None,#fcn_mobile.get_init_fn(), session_config=config, number_of_steps=65000) print('Finished training. Last batch loss %f' % final_loss) if __name__=='__main__': main()
34.151786
99
0.539608
427
3,825
4.576112
0.412178
0.036847
0.033265
0.027636
0.156602
0.144831
0.113613
0.076766
0.0348
0
0
0.038193
0.363399
3,825
112
100
34.151786
0.764271
0.152941
0
0
0
0
0.059769
0.015424
0
0
0
0
0
1
0.017857
false
0
0.107143
0
0.125
0.017857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a69e368874ca389ea7a44e379f62b44b8a60c98
4,411
py
Python
iap/validate_jwt.py
spitfire55/python-docs-samples
b8fe0d1c5c9f7f5d27965fa3367117af7b1f0aed
[ "Apache-2.0" ]
4
2018-12-23T18:17:14.000Z
2020-01-05T19:13:58.000Z
iap/validate_jwt.py
spitfire55/python-docs-samples
b8fe0d1c5c9f7f5d27965fa3367117af7b1f0aed
[ "Apache-2.0" ]
16
2019-06-15T00:02:56.000Z
2021-03-25T23:22:38.000Z
iap/validate_jwt.py
spitfire55/python-docs-samples
b8fe0d1c5c9f7f5d27965fa3367117af7b1f0aed
[ "Apache-2.0" ]
4
2018-06-03T14:43:25.000Z
2019-11-24T04:05:18.000Z
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Sample showing how to validate the Identity-Aware Proxy (IAP) JWT. This code should be used by applications in Google Compute Engine-based environments (such as Google App Engine flexible environment, Google Compute Engine, or Google Container Engine) to provide an extra layer of assurance that a request was authorized by IAP. For applications running in the App Engine standard environment, use App Engine's Users API instead. """ # [START iap_validate_jwt] import jwt import requests def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number, cloud_project_id): """Validate a JWT passed to your App Engine app by Identity-Aware Proxy. Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header. cloud_project_number: The project *number* for your Google Cloud project. This is returned by 'gcloud projects describe $PROJECT_ID', or in the Project Info card in Cloud Console. cloud_project_id: The project *ID* for your Google Cloud project. Returns: (user_id, user_email, error_str). """ expected_audience = '/projects/{}/apps/{}'.format( cloud_project_number, cloud_project_id) return _validate_iap_jwt(iap_jwt, expected_audience) def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number, backend_service_id): """Validate an IAP JWT for your (Compute|Container) Engine service. Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header. cloud_project_number: The project *number* for your Google Cloud project. This is returned by 'gcloud projects describe $PROJECT_ID', or in the Project Info card in Cloud Console. backend_service_id: The ID of the backend service used to access the application. See https://cloud.google.com/iap/docs/signed-headers-howto for details on how to get this value. Returns: (user_id, user_email, error_str). """ expected_audience = '/projects/{}/global/backendServices/{}'.format( cloud_project_number, backend_service_id) return _validate_iap_jwt(iap_jwt, expected_audience) def _validate_iap_jwt(iap_jwt, expected_audience): try: key_id = jwt.get_unverified_header(iap_jwt).get('kid') if not key_id: return (None, None, '**ERROR: no key ID**') key = get_iap_key(key_id) decoded_jwt = jwt.decode( iap_jwt, key, algorithms=['ES256'], audience=expected_audience) return (decoded_jwt['sub'], decoded_jwt['email'], '') except (jwt.exceptions.InvalidTokenError, requests.exceptions.RequestException) as e: return (None, None, '**ERROR: JWT validation error {}**'.format(e)) def get_iap_key(key_id): """Retrieves a public key from the list published by Identity-Aware Proxy, re-fetching the key file if necessary. """ key_cache = get_iap_key.key_cache key = key_cache.get(key_id) if not key: # Re-fetch the key file. resp = requests.get( 'https://www.gstatic.com/iap/verify/public_key') if resp.status_code != 200: raise Exception( 'Unable to fetch IAP keys: {} / {} / {}'.format( resp.status_code, resp.headers, resp.text)) key_cache = resp.json() get_iap_key.key_cache = key_cache key = key_cache.get(key_id) if not key: raise Exception('Key {!r} not found'.format(key_id)) return key # Used to cache the Identity-Aware Proxy public keys. This code only # refetches the file when a JWT is signed with a key not present in # this cache. get_iap_key.key_cache = {} # [END iap_validate_jwt]
38.692982
79
0.682838
620
4,411
4.691935
0.316129
0.037126
0.037126
0.020626
0.341354
0.314541
0.24407
0.236507
0.236507
0.236507
0
0.004148
0.234867
4,411
113
80
39.035398
0.857778
0.533893
0
0.139535
0
0
0.119209
0.019781
0
0
0
0
0
1
0.093023
false
0
0.046512
0
0.27907
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8a6b4f25018fb455967003872eafa0810ca93675
1,995
py
Python
examples/calc.py
manatlan/htag
cf085077adf04bec8a2b059497efedb210c59936
[ "MIT" ]
1
2022-03-12T09:42:13.000Z
2022-03-12T09:42:13.000Z
examples/calc.py
manatlan/thag
0c57a103a8dbdbf9e1f09c759f1c35c1c3eff359
[ "MIT" ]
null
null
null
examples/calc.py
manatlan/thag
0c57a103a8dbdbf9e1f09c759f1c35c1c3eff359
[ "MIT" ]
null
null
null
import os,sys; sys.path.insert(0,os.path.dirname(os.path.dirname(__file__))) from htag import Tag """ This example show you how to make a "Calc App" (with physical buttons + keyboard events) There is no work for rendering the layout ;-) Can't be simpler ! """ class Calc(Tag.div): statics=[Tag.H.style(""" .mycalc *,button {font-size:2em;font-family: monospace} """)] def init(self): self.txt="" self.aff = Tag.Div("&nbsp;",_style="border:1px solid black") self["class"]="mycalc" self <= self.aff self <= Tag.button("C", _onclick=self.bind( self.clean) ) self <= [Tag.button(i, _onclick=self.bind( self.press, i) ) for i in "0123456789+-x/."] self <= Tag.button("=", _onclick=self.bind( self.compute ) ) #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard self["onkeyup"] = self.bind( self.presskey, b"event.key" ) def presskey(self,key): if key in "0123456789+-*/.": self.press(key) elif key=="Enter": self.compute() elif key in ["Delete","Backspace"]: self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def press(self,val): self.txt += val self.aff.set( self.txt ) def compute(self): try: self.txt = str(eval(self.txt.replace("x","*"))) self.aff.set( self.txt ) except: self.txt = "" self.aff.set( "Error" ) def clean(self): self.txt="" self.aff.set("&nbsp;") if __name__=="__main__": # import logging # logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG) # logging.getLogger("htag.tag").setLevel( logging.INFO ) # and execute it in a pywebview instance from htag.runners import * # here is another runner, in a simple browser (thru ajax calls) BrowserHTTP( Calc ).run() # PyWebWiew( Calc ).run()
28.913043
96
0.543358
246
1,995
4.341463
0.487805
0.052434
0.044944
0.039326
0.08427
0
0
0
0
0
0
0.015852
0.241103
1,995
68
97
29.338235
0.689564
0.214536
0
0.131579
0
0
0.132616
0.019355
0
0
0
0
0
1
0.131579
false
0
0.078947
0
0.263158
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0