hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a8b93de9ef615a88be0dad5abda769599f3cf01
| 2,886
|
py
|
Python
|
neptune/internal/client_library/job_development_api/image.py
|
jiji-online/neptune-cli
|
50cf680a80d141497f9331ab7cdaee49fcb90b0c
|
[
"Apache-2.0"
] | null | null | null |
neptune/internal/client_library/job_development_api/image.py
|
jiji-online/neptune-cli
|
50cf680a80d141497f9331ab7cdaee49fcb90b0c
|
[
"Apache-2.0"
] | null | null | null |
neptune/internal/client_library/job_development_api/image.py
|
jiji-online/neptune-cli
|
50cf680a80d141497f9331ab7cdaee49fcb90b0c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from future import standard_library
standard_library.install_aliases()
# pylint: disable=wrong-import-position
from future.builtins import object
import base64
import io
import PIL.Image
from neptune.generated.swagger_client import InputImage
from neptune.internal.common.models.parameters_validation import (
of_type_validator,
text_conv,
validate
)
class Image(object):
"""
Represents information about images sent to image channels.
"""
@validate(name=text_conv, description=text_conv, data=of_type_validator(PIL.Image.Image))
def __init__(self, name, description, data):
"""
Creates a new Image.
:param name: Name of the image, displayed in the Channels tab on job's dashboard.
:param description: Description of the image displayed in the Channels tab
on job's dashboard.
:param data: Image data.
:type name: unicode
:type description: unicode
:type data: PIL.Image
"""
self._name = name
self._description = description
self._data = data
def to_input_image(self):
"""
Creates InputImage that can be sent to Neptune.
:return: input image in format appropriate to be sent to Neptune.
:rtype: InputImage
"""
image_buffer = io.BytesIO()
self.data.save(image_buffer, format='PNG')
contents = image_buffer.getvalue()
image_buffer.close()
input_image = InputImage()
input_image.name = self.name
input_image.description = self.description
input_image.data = base64.b64encode(contents).decode('utf-8')
return input_image
@property
def name(self):
"""
Gets name of this Image.
:return: The name of this Image.
:rtype: str
"""
return self._name
@property
def description(self):
"""
Gets description of this Image.
:return: The description of this Image.
:rtype: str
"""
return self._description
@property
def data(self):
"""
Gets data of this Image.
:return: The data of this Image.
:rtype: PIL.Image
"""
return self._data
| 26.722222
| 93
| 0.647263
| 359
| 2,886
| 5.111421
| 0.389972
| 0.038147
| 0.035967
| 0.027793
| 0.124251
| 0.091553
| 0.091553
| 0.059946
| 0.059946
| 0.059946
| 0
| 0.007597
| 0.27027
| 2,886
| 107
| 94
| 26.971963
| 0.863723
| 0.469508
| 0
| 0.081081
| 0
| 0
| 0.006329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0
| 0.189189
| 0
| 0.459459
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a8e67d20152ca7acb56c8c6199b1999e7f99406
| 500
|
py
|
Python
|
pybyte/session.py
|
ms7m/py-byte
|
c5872ff5b8536160d8cbd7f88406ed593113e77d
|
[
"MIT"
] | 4
|
2020-01-26T17:22:05.000Z
|
2020-08-15T12:23:31.000Z
|
pybyte/session.py
|
ms7m/py-byte
|
c5872ff5b8536160d8cbd7f88406ed593113e77d
|
[
"MIT"
] | 3
|
2020-01-27T18:10:06.000Z
|
2020-03-31T10:56:03.000Z
|
pybyte/session.py
|
ms7m/py-byte
|
c5872ff5b8536160d8cbd7f88406ed593113e77d
|
[
"MIT"
] | 2
|
2020-01-27T17:59:45.000Z
|
2020-02-01T16:43:53.000Z
|
import requests
class ByteSession(object):
def __init__(self, token, providedSession=False):
self._userToken = token
if providedSession == False:
self._session = requests.session()
else:
self._session = providedSession
self._session.headers = {
"Authorization": token,
"User-Agent": "byte/0.2 (co.byte.video; build:145; iOS 13.3.0) Alamofire/4.9.1"
}
def session(self):
return self._session
| 26.315789
| 91
| 0.594
| 54
| 500
| 5.333333
| 0.62963
| 0.152778
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034091
| 0.296
| 500
| 19
| 92
| 26.315789
| 0.784091
| 0
| 0
| 0
| 0
| 0.071429
| 0.172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0.071429
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a8f6efd4560f4b302cef1f8c07a3c86f509c35d
| 642
|
py
|
Python
|
debugging/code/multiprocess_main.py
|
awesome-archive/python-debugging-skills
|
69af455302a805d6f198a06ea934f79d5913cb3e
|
[
"MIT"
] | null | null | null |
debugging/code/multiprocess_main.py
|
awesome-archive/python-debugging-skills
|
69af455302a805d6f198a06ea934f79d5913cb3e
|
[
"MIT"
] | null | null | null |
debugging/code/multiprocess_main.py
|
awesome-archive/python-debugging-skills
|
69af455302a805d6f198a06ea934f79d5913cb3e
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
import multiprocessing as mp
import time
from pudb.remote import set_trace
def worker(worker_id):
""" Simple worker process"""
i = 0
while i < 10:
if worker_id == 1: # debug process with id 1
set_trace(term_size=(80, 24))
time.sleep(1) # represents some work
print('In Process {}, i:{}'.format(worker_id, i))
i = i + 1
if __name__ == '__main__':
processes = []
for p_id in range(2): # 2 worker processes
p = mp.Process(target=worker, args=(p_id,))
p.start()
processes.append(p)
for p in processes:
p.join()
| 22.928571
| 57
| 0.573209
| 91
| 642
| 3.868132
| 0.538462
| 0.068182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030973
| 0.29595
| 642
| 27
| 58
| 23.777778
| 0.747788
| 0.17134
| 0
| 0
| 0
| 0
| 0.051724
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.210526
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a8fb5db5439c528b72c62e081f71f595115b9ad
| 1,317
|
py
|
Python
|
lldb/packages/Python/lldbsuite/test/expression_command/anonymous-struct/TestCallUserAnonTypedef.py
|
bytesnake/Enzyme
|
247606c279920d476645d2e319e574bf8be10fc9
|
[
"Apache-2.0"
] | null | null | null |
lldb/packages/Python/lldbsuite/test/expression_command/anonymous-struct/TestCallUserAnonTypedef.py
|
bytesnake/Enzyme
|
247606c279920d476645d2e319e574bf8be10fc9
|
[
"Apache-2.0"
] | null | null | null |
lldb/packages/Python/lldbsuite/test/expression_command/anonymous-struct/TestCallUserAnonTypedef.py
|
bytesnake/Enzyme
|
247606c279920d476645d2e319e574bf8be10fc9
|
[
"Apache-2.0"
] | null | null | null |
"""
Test calling user defined functions using expression evaluation.
This test checks that typesystem lookup works correctly for typedefs of
untagged structures.
Ticket: https://llvm.org/bugs/show_bug.cgi?id=26790
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestExprLookupAnonStructTypedef(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
# Find the breakpoint
self.line = line_number('main.cpp', '// lldb testsuite break')
@expectedFailureAll(oslist=["windows"])
@expectedFailureAll(
oslist=['linux'],
archs=['arm'],
bugnumber="llvm.org/pr27868")
def test(self):
"""Test typedeffed untagged struct arguments for function call expressions"""
self.build()
self.runCmd("file "+self.getBuildArtifact("a.out"),
CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"main.cpp",
self.line,
num_expected_locations=-1,
loc_exact=True
)
self.runCmd("run", RUN_SUCCEEDED)
self.expect("expr multiply(&s)", substrs=['$0 = 1'])
| 28.021277
| 85
| 0.658314
| 148
| 1,317
| 5.695946
| 0.648649
| 0.046263
| 0.060498
| 0.054567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01291
| 0.235383
| 1,317
| 46
| 86
| 28.630435
| 0.82423
| 0.230068
| 0
| 0
| 0
| 0
| 0.105894
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.178571
| 0
| 0.321429
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a920dacf31156e85a0fcebb52765a1d1ca683fe
| 2,255
|
py
|
Python
|
authors/apps/notifications/views.py
|
andela/ah-backend-spaces-
|
58e031a96a6b9555f1a4133cf8cb688c236d3f3b
|
[
"BSD-3-Clause"
] | 2
|
2018-08-17T15:47:36.000Z
|
2018-09-13T13:58:34.000Z
|
authors/apps/notifications/views.py
|
andela/ah-backend-spaces-
|
58e031a96a6b9555f1a4133cf8cb688c236d3f3b
|
[
"BSD-3-Clause"
] | 35
|
2018-07-24T11:42:53.000Z
|
2021-06-10T20:34:41.000Z
|
authors/apps/notifications/views.py
|
andela/ah-backend-spaces-
|
58e031a96a6b9555f1a4133cf8cb688c236d3f3b
|
[
"BSD-3-Clause"
] | 3
|
2018-07-17T13:05:35.000Z
|
2018-09-06T16:03:52.000Z
|
from rest_framework import status
from rest_framework.generics import (
RetrieveUpdateAPIView, CreateAPIView,
RetrieveUpdateDestroyAPIView
)
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from ..authentication.backends import JWTAuthentication
from ..authentication.models import User
from .models import Notifications
from .renderers import (
NotificationsJSONRenderer
)
from .serializers import (
NotificationsAPIViewSerializer, GetNotificationsAPIViewSerializer
)
class NotificationsAPIView(RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated,)
renderer_classes = (NotificationsJSONRenderer,)
def put(self, request):
"""
This class method is used to update a users article
"""
serializer_class = NotificationsAPIViewSerializer
notification = request.data.get('notification', {})
user_data = JWTAuthentication().authenticate(request)
# append user_id from token to article variable for later validations in serializers
notification["user_id"] = user_data[1]
serializer = serializer_class(data=notification)
serializer.is_valid(raise_exception=True)
# update the notification statue to True
serializer.update_read_status(serializer.data["notifications"])
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get(self, request):
"""
retrieve all notifications of a user
"""
# decode users authentication token
user_data = JWTAuthentication().authenticate(request)
# get user notifications details from the Notifications table in the database
notifications = Notifications.objects.filter(notification_owner=user_data[1]).values(
"id", "article_id", "notification_title", "notification_body",
"notification_owner", "read_status"
)
# create a list of notifications
# the action below is done by use of list comprehension
list_of_notifications = [i for i in notifications]
return Response({"notifications": list_of_notifications}, status=status.HTTP_200_OK)
| 34.692308
| 93
| 0.73082
| 228
| 2,255
| 7.083333
| 0.407895
| 0.024768
| 0.052632
| 0.04582
| 0.054489
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004442
| 0.20133
| 2,255
| 64
| 94
| 35.234375
| 0.892282
| 0.180044
| 0
| 0.054054
| 0
| 0
| 0.067334
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.27027
| 0
| 0.459459
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a95707999c6dd8d718ea3549fc898fb5e496ed8
| 965
|
py
|
Python
|
python/flexflow/keras/datasets/cifar.py
|
zmxdream/FlexFlow
|
7ea50d71a02e853af7ae573d88c911511b3e82e0
|
[
"Apache-2.0"
] | 455
|
2018-12-09T01:57:46.000Z
|
2022-03-22T01:56:47.000Z
|
python/flexflow/keras/datasets/cifar.py
|
zmxdream/FlexFlow
|
7ea50d71a02e853af7ae573d88c911511b3e82e0
|
[
"Apache-2.0"
] | 136
|
2019-04-19T08:24:27.000Z
|
2022-03-28T01:39:19.000Z
|
python/flexflow/keras/datasets/cifar.py
|
zmxdream/FlexFlow
|
7ea50d71a02e853af7ae573d88c911511b3e82e0
|
[
"Apache-2.0"
] | 102
|
2018-12-22T07:38:05.000Z
|
2022-03-30T06:04:39.000Z
|
# -*- coding: utf-8 -*-
"""Utilities common to CIFAR10 and CIFAR100 datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from six.moves import cPickle
def load_batch(fpath, label_key='labels'):
"""Internal utility for parsing CIFAR data.
# Arguments
fpath: path the file to parse.
label_key: key for label data in the retrieve
dictionary.
# Returns
A tuple `(data, labels)`.
"""
with open(fpath, 'rb') as f:
if sys.version_info < (3,):
d = cPickle.load(f)
else:
d = cPickle.load(f, encoding='bytes')
# decode utf8
d_decoded = {}
for k, v in d.items():
d_decoded[k.decode('utf8')] = v
d = d_decoded
data = d['data']
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels
| 26.081081
| 53
| 0.586528
| 126
| 965
| 4.31746
| 0.547619
| 0.055147
| 0.088235
| 0.047794
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022355
| 0.304663
| 965
| 37
| 54
| 26.081081
| 0.788376
| 0.295337
| 0
| 0
| 0
| 0
| 0.032864
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.263158
| 0
| 0.368421
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a96000d55f22510def511958af1f99b01dba806
| 523
|
py
|
Python
|
day7/p2.py
|
Seralpa/Advent-of-code
|
9633624e4ff48c50d8be3deac54c83059e9c3b04
|
[
"MIT"
] | 1
|
2020-12-18T16:06:25.000Z
|
2020-12-18T16:06:25.000Z
|
day7/p2.py
|
Seralpa/Advent-of-code
|
9633624e4ff48c50d8be3deac54c83059e9c3b04
|
[
"MIT"
] | null | null | null |
day7/p2.py
|
Seralpa/Advent-of-code
|
9633624e4ff48c50d8be3deac54c83059e9c3b04
|
[
"MIT"
] | null | null | null |
def getNumBags(color):
if color=='':
return 0
numBags=1
for bag in rules[color]:
numBags+=bag[1]*getNumBags(bag[0])
return numBags
with open('day7/input.txt') as f:
rules=dict([l.split(' contain') for l in f.read().replace(' bags', '').replace(' bag', '').replace('.', '').replace(' no other', '0 ').splitlines()])
for key in rules:
rules[key]=[(d[2:].strip(), int(d[:2].strip())) for d in rules[key].split(', ')]
print(getNumBags('shiny gold')-1) #-1 cause shiny bag not included
| 40.230769
| 153
| 0.596558
| 79
| 523
| 3.949367
| 0.506329
| 0.067308
| 0.044872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02331
| 0.179732
| 523
| 13
| 154
| 40.230769
| 0.703963
| 0.059273
| 0
| 0
| 0
| 0
| 0.111789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9611abd97c536f926ca250cbefadb44ebcbbc2
| 471
|
py
|
Python
|
adv/luther.py
|
6tennis/dl
|
69eb7e71da9fabe9e7ec40c461b525b4f967f345
|
[
"Apache-2.0"
] | null | null | null |
adv/luther.py
|
6tennis/dl
|
69eb7e71da9fabe9e7ec40c461b525b4f967f345
|
[
"Apache-2.0"
] | null | null | null |
adv/luther.py
|
6tennis/dl
|
69eb7e71da9fabe9e7ec40c461b525b4f967f345
|
[
"Apache-2.0"
] | null | null | null |
from core.advbase import *
from slot.d import *
def module():
return Luther
class Luther(Adv):
a1 = ('cc',0.10,'hit15')
conf = {}
conf ['slots.d'] = Leviathan()
conf['acl'] = """
`dragon
`s1
`s2, seq=5 and cancel
`s3, seq=5 and cancel or fsc
`fs, seq=5
"""
coab = ['Blade', 'Xander', 'Tiki']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
| 20.478261
| 44
| 0.552017
| 64
| 471
| 3.875
| 0.703125
| 0.048387
| 0.056452
| 0.104839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035821
| 0.288747
| 471
| 23
| 45
| 20.478261
| 0.704478
| 0
| 0
| 0
| 0
| 0
| 0.336864
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0.052632
| 0.473684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9615693970b5561756763d7533ebc0f325ce0c
| 21,646
|
py
|
Python
|
wandb/sdk/data_types/image.py
|
macio232/client
|
295380c99b1a0946470672d40348b17a674ad17f
|
[
"MIT"
] | null | null | null |
wandb/sdk/data_types/image.py
|
macio232/client
|
295380c99b1a0946470672d40348b17a674ad17f
|
[
"MIT"
] | null | null | null |
wandb/sdk/data_types/image.py
|
macio232/client
|
295380c99b1a0946470672d40348b17a674ad17f
|
[
"MIT"
] | null | null | null |
import hashlib
from io import BytesIO
import logging
import os
from typing import Any, cast, Dict, List, Optional, Sequence, Type, TYPE_CHECKING, Union
from pkg_resources import parse_version
import wandb
from wandb import util
from ._private import MEDIA_TMP
from .base_types.media import BatchableMedia, Media
from .helper_types.bounding_boxes_2d import BoundingBoxes2D
from .helper_types.classes import Classes
from .helper_types.image_mask import ImageMask
if TYPE_CHECKING: # pragma: no cover
import matplotlib # type: ignore
import numpy as np # type: ignore
import PIL # type: ignore
import torch # type: ignore
from wandb.apis.public import Artifact as PublicArtifact
from ..wandb_artifacts import Artifact as LocalArtifact
from ..wandb_run import Run as LocalRun
ImageDataType = Union[
"matplotlib.artist.Artist", "PIL.Image", "TorchTensorType", "np.ndarray"
]
ImageDataOrPathType = Union[str, "Image", ImageDataType]
TorchTensorType = Union["torch.Tensor", "torch.Variable"]
def _server_accepts_image_filenames() -> bool:
# Newer versions of wandb accept large image filenames arrays
# but older versions would have issues with this.
max_cli_version = util._get_max_cli_version()
if max_cli_version is None:
return False
return parse_version("0.12.10") <= parse_version(max_cli_version)
class Image(BatchableMedia):
"""Format images for logging to W&B.
Arguments:
data_or_path: (numpy array, string, io) Accepts numpy array of
image data, or a PIL image. The class attempts to infer
the data format and converts it.
mode: (string) The PIL mode for an image. Most common are "L", "RGB",
"RGBA". Full explanation at https://pillow.readthedocs.io/en/4.2.x/handbook/concepts.html#concept-modes.
caption: (string) Label for display of image.
Examples:
### Create a wandb.Image from a numpy array
<!--yeadoc-test:log-image-numpy->
```python
import numpy as np
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3))
image = wandb.Image(pixels, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
### Create a wandb.Image from a PILImage
<!--yeadoc-test:log-image-pil->
```python
import numpy as np
from PIL import Image as PILImage
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
pil_image = PILImage.fromarray(pixels, mode="RGB")
image = wandb.Image(pil_image, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
"""
MAX_ITEMS = 108
# PIL limit
MAX_DIMENSION = 65500
_log_type = "image-file"
format: Optional[str]
_grouping: Optional[int]
_caption: Optional[str]
_width: Optional[int]
_height: Optional[int]
_image: Optional["PIL.Image"]
_classes: Optional["Classes"]
_boxes: Optional[Dict[str, "BoundingBoxes2D"]]
_masks: Optional[Dict[str, "ImageMask"]]
def __init__(
self,
data_or_path: "ImageDataOrPathType",
mode: Optional[str] = None,
caption: Optional[str] = None,
grouping: Optional[int] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
super(Image, self).__init__()
# TODO: We should remove grouping, it's a terrible name and I don't
# think anyone uses it.
self._grouping = None
self._caption = None
self._width = None
self._height = None
self._image = None
self._classes = None
self._boxes = None
self._masks = None
# Allows the user to pass an Image object as the first parameter and have a perfect copy,
# only overriding additional metdata passed in. If this pattern is compelling, we can generalize.
if isinstance(data_or_path, Image):
self._initialize_from_wbimage(data_or_path)
elif isinstance(data_or_path, str):
self._initialize_from_path(data_or_path)
else:
self._initialize_from_data(data_or_path, mode)
self._set_initialization_meta(grouping, caption, classes, boxes, masks)
def _set_initialization_meta(
self,
grouping: Optional[int] = None,
caption: Optional[str] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
if grouping is not None:
self._grouping = grouping
if caption is not None:
self._caption = caption
total_classes = {}
if boxes:
if not isinstance(boxes, dict):
raise ValueError('Images "boxes" argument must be a dictionary')
boxes_final: Dict[str, BoundingBoxes2D] = {}
for key in boxes:
box_item = boxes[key]
if isinstance(box_item, BoundingBoxes2D):
boxes_final[key] = box_item
elif isinstance(box_item, dict):
# TODO: Consider injecting top-level classes if user-provided is empty
boxes_final[key] = BoundingBoxes2D(box_item, key)
total_classes.update(boxes_final[key]._class_labels)
self._boxes = boxes_final
if masks:
if not isinstance(masks, dict):
raise ValueError('Images "masks" argument must be a dictionary')
masks_final: Dict[str, ImageMask] = {}
for key in masks:
mask_item = masks[key]
if isinstance(mask_item, ImageMask):
masks_final[key] = mask_item
elif isinstance(mask_item, dict):
# TODO: Consider injecting top-level classes if user-provided is empty
masks_final[key] = ImageMask(mask_item, key)
if hasattr(masks_final[key], "_val"):
total_classes.update(masks_final[key]._val["class_labels"])
self._masks = masks_final
if classes is not None:
if isinstance(classes, Classes):
total_classes.update(
{val["id"]: val["name"] for val in classes._class_set}
)
else:
total_classes.update({val["id"]: val["name"] for val in classes})
if len(total_classes.keys()) > 0:
self._classes = Classes(
[
{"id": key, "name": total_classes[key]}
for key in total_classes.keys()
]
)
self._width, self._height = self.image.size # type: ignore
self._free_ram()
def _initialize_from_wbimage(self, wbimage: "Image") -> None:
self._grouping = wbimage._grouping
self._caption = wbimage._caption
self._width = wbimage._width
self._height = wbimage._height
self._image = wbimage._image
self._classes = wbimage._classes
self._path = wbimage._path
self._is_tmp = wbimage._is_tmp
self._extension = wbimage._extension
self._sha256 = wbimage._sha256
self._size = wbimage._size
self.format = wbimage.format
self._artifact_source = wbimage._artifact_source
self._artifact_target = wbimage._artifact_target
# We do not want to implicitly copy boxes or masks, just the image-related data.
# self._boxes = wbimage._boxes
# self._masks = wbimage._masks
def _initialize_from_path(self, path: str) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._set_file(path, is_tmp=False)
self._image = pil_image.open(path)
self._image.load()
ext = os.path.splitext(path)[1][1:]
self.format = ext
def _initialize_from_data(self, data: "ImageDataType", mode: str = None,) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
if util.is_matplotlib_typename(util.get_full_typename(data)):
buf = BytesIO()
util.ensure_matplotlib_figure(data).savefig(buf)
self._image = pil_image.open(buf)
elif isinstance(data, pil_image.Image):
self._image = data
elif util.is_pytorch_tensor_typename(util.get_full_typename(data)):
vis_util = util.get_module(
"torchvision.utils", "torchvision is required to render images"
)
if hasattr(data, "requires_grad") and data.requires_grad:
data = data.detach()
data = vis_util.make_grid(data, normalize=True)
self._image = pil_image.fromarray(
data.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
)
else:
if hasattr(data, "numpy"): # TF data eager tensors
data = data.numpy()
if data.ndim > 2:
data = data.squeeze() # get rid of trivial dimensions as a convenience
self._image = pil_image.fromarray(
self.to_uint8(data), mode=mode or self.guess_mode(data)
)
tmp_path = os.path.join(MEDIA_TMP.name, str(util.generate_id()) + ".png")
self.format = "png"
self._image.save(tmp_path, transparency=None)
self._set_file(tmp_path, is_tmp=True)
@classmethod
def from_json(
cls: Type["Image"], json_obj: dict, source_artifact: "PublicArtifact"
) -> "Image":
classes = None
if json_obj.get("classes") is not None:
classes = source_artifact.get(json_obj["classes"]["path"])
masks = json_obj.get("masks")
_masks: Optional[Dict[str, ImageMask]] = None
if masks:
_masks = {}
for key in masks:
_masks[key] = ImageMask.from_json(masks[key], source_artifact)
_masks[key]._set_artifact_source(source_artifact)
_masks[key]._key = key
boxes = json_obj.get("boxes")
_boxes: Optional[Dict[str, BoundingBoxes2D]] = None
if boxes:
_boxes = {}
for key in boxes:
_boxes[key] = BoundingBoxes2D.from_json(boxes[key], source_artifact)
_boxes[key]._key = key
return cls(
source_artifact.get_path(json_obj["path"]).download(),
caption=json_obj.get("caption"),
grouping=json_obj.get("grouping"),
classes=classes,
boxes=_boxes,
masks=_masks,
)
@classmethod
def get_media_subdir(cls: Type["Image"]) -> str:
return os.path.join("media", "images")
def bind_to_run(
self,
run: "LocalRun",
key: Union[int, str],
step: Union[int, str],
id_: Optional[Union[int, str]] = None,
ignore_copy_err: Optional[bool] = None,
) -> None:
super().bind_to_run(run, key, step, id_, ignore_copy_err=ignore_copy_err)
if self._boxes is not None:
for i, k in enumerate(self._boxes):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._boxes[k].bind_to_run(
run, key, step, id_, ignore_copy_err=ignore_copy_err
)
if self._masks is not None:
for i, k in enumerate(self._masks):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._masks[k].bind_to_run(
run, key, step, id_, ignore_copy_err=ignore_copy_err
)
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(Image, self).to_json(run_or_artifact)
json_dict["_type"] = Image._log_type
json_dict["format"] = self.format
if self._width is not None:
json_dict["width"] = self._width
if self._height is not None:
json_dict["height"] = self._height
if self._grouping:
json_dict["grouping"] = self._grouping
if self._caption:
json_dict["caption"] = self._caption
if isinstance(run_or_artifact, wandb.wandb_sdk.wandb_artifacts.Artifact):
artifact = run_or_artifact
if (
self._masks is not None or self._boxes is not None
) and self._classes is None:
raise ValueError(
"classes must be passed to wandb.Image which have masks or bounding boxes when adding to artifacts"
)
if self._classes is not None:
class_id = hashlib.md5(
str(self._classes._class_set).encode("utf-8")
).hexdigest()
class_name = os.path.join("media", "classes", class_id + "_cls",)
classes_entry = artifact.add(self._classes, class_name)
json_dict["classes"] = {
"type": "classes-file",
"path": classes_entry.path,
"digest": classes_entry.digest,
}
elif not isinstance(run_or_artifact, wandb.wandb_sdk.wandb_run.Run):
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
if self._boxes:
json_dict["boxes"] = {
k: box.to_json(run_or_artifact) for (k, box) in self._boxes.items()
}
if self._masks:
json_dict["masks"] = {
k: mask.to_json(run_or_artifact) for (k, mask) in self._masks.items()
}
return json_dict
def guess_mode(self, data: "np.ndarray") -> str:
"""
Guess what type of image the np.array is representing
"""
# TODO: do we want to support dimensions being at the beginning of the array?
if data.ndim == 2:
return "L"
elif data.shape[-1] == 3:
return "RGB"
elif data.shape[-1] == 4:
return "RGBA"
else:
raise ValueError(
"Un-supported shape for image conversion %s" % list(data.shape)
)
@classmethod
def to_uint8(cls, data: "np.ndarray") -> "np.ndarray":
"""
Converts floating point image on the range [0,1] and integer images
on the range [0,255] to uint8, clipping if necessary.
"""
np = util.get_module(
"numpy",
required="wandb.Image requires numpy if not supplying PIL Images: pip install numpy",
)
# I think it's better to check the image range vs the data type, since many
# image libraries will return floats between 0 and 255
# some images have range -1...1 or 0-1
dmin = np.min(data)
if dmin < 0:
data = (data - np.min(data)) / np.ptp(data)
if np.max(data) <= 1.0:
data = (data * 255).astype(np.int32)
# assert issubclass(data.dtype.type, np.integer), 'Illegal image format.'
return data.clip(0, 255).astype(np.uint8)
@classmethod
def seq_to_json(
cls: Type["Image"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
"""
Combines a list of images into a meta dictionary object describing the child images.
"""
if TYPE_CHECKING:
seq = cast(Sequence["Image"], seq)
jsons = [obj.to_json(run) for obj in seq]
media_dir = cls.get_media_subdir()
for obj in jsons:
expected = util.to_forward_slash_path(media_dir)
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Image's must be in the {} directory, not {}".format(
cls.get_media_subdir(), obj["path"]
)
)
num_images_to_log = len(seq)
width, height = seq[0].image.size # type: ignore
format = jsons[0]["format"]
def size_equals_image(image: "Image") -> bool:
img_width, img_height = image.image.size # type: ignore
return img_width == width and img_height == height # type: ignore
sizes_match = all(size_equals_image(img) for img in seq)
if not sizes_match:
logging.warning(
"Images sizes do not match. This will causes images to be display incorrectly in the UI."
)
meta = {
"_type": "images/separated",
"width": width,
"height": height,
"format": format,
"count": num_images_to_log,
}
if _server_accepts_image_filenames():
meta["filenames"] = [obj["path"] for obj in jsons]
else:
wandb.termwarn(
"Unable to log image array filenames. In some cases, this can prevent images from being"
"viewed in the UI. Please upgrade your wandb server",
repeat=False,
)
captions = Image.all_captions(seq)
if captions:
meta["captions"] = captions
all_masks = Image.all_masks(seq, run, key, step)
if all_masks:
meta["all_masks"] = all_masks
all_boxes = Image.all_boxes(seq, run, key, step)
if all_boxes:
meta["all_boxes"] = all_boxes
return meta
@classmethod
def all_masks(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_mask_groups: List[Optional[dict]] = []
for image in images:
if image._masks:
mask_group = {}
for k in image._masks:
mask = image._masks[k]
mask_group[k] = mask.to_json(run)
all_mask_groups.append(mask_group)
else:
all_mask_groups.append(None)
if all_mask_groups and not all(x is None for x in all_mask_groups):
return all_mask_groups
else:
return False
@classmethod
def all_boxes(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_box_groups: List[Optional[dict]] = []
for image in images:
if image._boxes:
box_group = {}
for k in image._boxes:
box = image._boxes[k]
box_group[k] = box.to_json(run)
all_box_groups.append(box_group)
else:
all_box_groups.append(None)
if all_box_groups and not all(x is None for x in all_box_groups):
return all_box_groups
else:
return False
@classmethod
def all_captions(
cls: Type["Image"], images: Sequence["Media"]
) -> Union[bool, Sequence[Optional[str]]]:
return cls.captions(images)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Image):
return False
else:
self_image = self.image
other_image = other.image
if self_image is not None:
self_image = list(self_image.getdata())
if other_image is not None:
other_image = list(other_image.getdata())
return (
self._grouping == other._grouping
and self._caption == other._caption
and self._width == other._width
and self._height == other._height
and self_image == other_image
and self._classes == other._classes
)
def to_data_array(self) -> List[Any]:
res = []
if self.image is not None:
data = list(self.image.getdata())
for i in range(self.image.height):
res.append(data[i * self.image.width : (i + 1) * self.image.width])
self._free_ram()
return res
def _free_ram(self) -> None:
if self._path is not None:
self._image = None
@property
def image(self) -> Optional["PIL.Image"]:
if self._image is None:
if self._path is not None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._image = pil_image.open(self._path)
self._image.load()
return self._image
| 36.688136
| 119
| 0.572438
| 2,602
| 21,646
| 4.567256
| 0.154112
| 0.01969
| 0.013632
| 0.007152
| 0.241669
| 0.186974
| 0.171491
| 0.153315
| 0.146415
| 0.141535
| 0
| 0.007541
| 0.326111
| 21,646
| 589
| 120
| 36.750424
| 0.807157
| 0.130832
| 0
| 0.205357
| 0
| 0
| 0.09162
| 0.002533
| 0
| 0
| 0
| 0.003396
| 0
| 1
| 0.049107
| false
| 0.002232
| 0.044643
| 0.006696
| 0.169643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a961a7708b268c3d81ea73ab8b93515bd578d6c
| 669
|
py
|
Python
|
src/ACC_Backend_Utils.py
|
skostic14/isda-racing-backend
|
41b5f9760dc17a29aa8ab5e4cc1894a27496a72c
|
[
"Apache-2.0"
] | 1
|
2021-07-29T05:29:06.000Z
|
2021-07-29T05:29:06.000Z
|
src/ACC_Backend_Utils.py
|
skostic14/isda-racing-backend
|
41b5f9760dc17a29aa8ab5e4cc1894a27496a72c
|
[
"Apache-2.0"
] | null | null | null |
src/ACC_Backend_Utils.py
|
skostic14/isda-racing-backend
|
41b5f9760dc17a29aa8ab5e4cc1894a27496a72c
|
[
"Apache-2.0"
] | null | null | null |
import datetime
# Gets time from milliseconds
# Returns string formatted as HH:MM:SS:mmm, MM:SS:mmm or S:mmm, depending on the time.
def get_time_from_milliseconds(milli):
milliseconds = milli % 1000
seconds= (milli//1000)%60
minutes= (milli//(1000*60))%60
hours= (milli//(1000*60*60))%24
if hours == 0:
if minutes == 0:
return '%d.%03d' % (seconds, milliseconds)
return '%02d:%02d.%03d' % (minutes, seconds, milliseconds)
return '%02d:%02d:%02d.%03d' % (hours, minutes, seconds, milliseconds)
# Returns a string formatted as YYYY-MM-DD
def get_date_today():
return datetime.date.today().strftime("%Y-%m-%d")
| 35.210526
| 87
| 0.651719
| 95
| 669
| 4.536842
| 0.442105
| 0.083527
| 0.076566
| 0.060325
| 0.143852
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085343
| 0.19432
| 669
| 19
| 88
| 35.210526
| 0.714286
| 0.230194
| 0
| 0
| 0
| 0
| 0.09375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0.076923
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a961b6b72524b941aa7777c8c1e4c9ea87f76f0
| 2,721
|
py
|
Python
|
examples/advanced/pidigits.py
|
ovolve/sympy
|
0a15782f20505673466b940454b33b8014a25c13
|
[
"BSD-3-Clause"
] | 3
|
2015-01-17T23:15:04.000Z
|
2015-05-26T14:11:44.000Z
|
examples/advanced/pidigits.py
|
ovolve/sympy
|
0a15782f20505673466b940454b33b8014a25c13
|
[
"BSD-3-Clause"
] | 7
|
2015-03-23T23:33:02.000Z
|
2019-02-09T00:19:41.000Z
|
examples/advanced/pidigits.py
|
ovolve/sympy
|
0a15782f20505673466b940454b33b8014a25c13
|
[
"BSD-3-Clause"
] | 1
|
2019-10-18T12:39:41.000Z
|
2019-10-18T12:39:41.000Z
|
#!/usr/bin/env python
"""Pi digits example
Example shows arbitrary precision using mpmath with the
computation of the digits of pi.
"""
from mpmath import libmp, pi
from mpmath import functions as mpf_funs
import math
from time import clock
import sys
def display_fraction(digits, skip=0, colwidth=10, columns=5):
"""Pretty printer for first n digits of a fraction"""
perline = colwidth * columns
printed = 0
for linecount in range((len(digits) - skip) // (colwidth * columns)):
line = digits[skip + linecount*perline:skip + (linecount + 1)*perline]
for i in range(columns):
print(line[i*colwidth: (i + 1)*colwidth],)
print(":", (linecount + 1)*perline)
if (linecount + 1) % 10 == 0:
print
printed += colwidth*columns
rem = (len(digits) - skip) % (colwidth * columns)
if rem:
buf = digits[-rem:]
s = ""
for i in range(columns):
s += buf[:colwidth].ljust(colwidth + 1, " ")
buf = buf[colwidth:]
print(s + ":", printed + colwidth*columns)
def calculateit(func, base, n, tofile):
"""Writes first n base-digits of a mpmath function to file"""
prec = 100
intpart = libmp.numeral(3, base)
if intpart == 0:
skip = 0
else:
skip = len(intpart)
print("Step 1 of 2: calculating binary value...")
prec = int(n*math.log(base, 2)) + 10
t = clock()
a = func(prec)
step1_time = clock() - t
print("Step 2 of 2: converting to specified base...")
t = clock()
d = libmp.bin_to_radix(a.man, -a.exp, base, n)
d = libmp.numeral(d, base, n)
step2_time = clock() - t
print("\nWriting output...\n")
if tofile:
out_ = sys.stdout
sys.stdout = tofile
print("%i base-%i digits of pi:\n" % (n, base))
print(intpart, ".\n")
display_fraction(d, skip, colwidth=10, columns=5)
if tofile:
sys.stdout = out_
print("\nFinished in %f seconds (%f calc, %f convert)" % \
((step1_time + step2_time), step1_time, step2_time))
def interactive():
"""Simple function to interact with user"""
print("Compute digits of pi with SymPy\n")
base = input("Which base? (2-36, 10 for decimal) \n> ")
digits = input("How many digits? (enter a big number, say, 10000)\n> ")
tofile = raw_input("Output to file? (enter a filename, or just press enter\nto print directly to the screen) \n> ")
if tofile:
tofile = open(tofile, "w")
calculateit(pi, base, digits, tofile)
def main():
"""A non-interactive runner"""
base = 16
digits = 500
tofile = None
calculateit(pi, base, digits, tofile)
if __name__ == "__main__":
interactive()
| 30.233333
| 119
| 0.602352
| 374
| 2,721
| 4.323529
| 0.347594
| 0.024737
| 0.018553
| 0.022263
| 0.092764
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024863
| 0.260933
| 2,721
| 89
| 120
| 30.573034
| 0.779214
| 0.108416
| 0
| 0.134328
| 0
| 0.014925
| 0.170905
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059701
| false
| 0
| 0.074627
| 0
| 0.134328
| 0.208955
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a97a004a7c418b0d32aaf5764a1c6b24a50f26a
| 10,580
|
py
|
Python
|
tempest/hacking/checks.py
|
rishabh20111990/tempest
|
df15531cd4231000b0da016f5cd8641523ce984e
|
[
"Apache-2.0"
] | 2
|
2015-08-13T00:07:49.000Z
|
2020-08-07T06:38:44.000Z
|
tempest/hacking/checks.py
|
rishabh20111990/tempest
|
df15531cd4231000b0da016f5cd8641523ce984e
|
[
"Apache-2.0"
] | null | null | null |
tempest/hacking/checks.py
|
rishabh20111990/tempest
|
df15531cd4231000b0da016f5cd8641523ce984e
|
[
"Apache-2.0"
] | 3
|
2016-08-30T06:53:54.000Z
|
2021-03-22T16:54:39.000Z
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from hacking import core
import pycodestyle
PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
'ironic', 'heat', 'sahara']
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
TESTTOOLS_SKIP_DECORATOR = re.compile(r'\s*@testtools\.skip\((.*)\)')
METHOD = re.compile(r"^ def .+")
METHOD_GET_RESOURCE = re.compile(r"^\s*def (list|show)\_.+")
METHOD_DELETE_RESOURCE = re.compile(r"^\s*def delete_.+")
CLASS = re.compile(r"^class .+")
EX_ATTRIBUTE = re.compile(r'(\s+|\()(e|ex|exc|exception).message(\s+|\))')
NEGATIVE_TEST_DECORATOR = re.compile(
r'\s*@decorators\.attr\(type=.*negative.*\)')
_HAVE_NEGATIVE_DECORATOR = False
@core.flake8ext
def import_no_clients_in_api_and_scenario_tests(physical_line, filename):
"""Check for client imports from tempest/api & tempest/scenario tests
T102: Cannot import OpenStack python clients
"""
if "tempest/api" in filename or "tempest/scenario" in filename:
res = PYTHON_CLIENT_RE.match(physical_line)
if res:
return (physical_line.find(res.group(1)),
("T102: python clients import not allowed"
" in tempest/api/* or tempest/scenario/* tests"))
@core.flake8ext
def scenario_tests_need_service_tags(physical_line, filename,
previous_logical):
"""Check that scenario tests have service tags
T104: Scenario tests require a services decorator
"""
if 'tempest/scenario/' in filename and '/test_' in filename:
if TEST_DEFINITION.match(physical_line):
if not SCENARIO_DECORATOR.match(previous_logical):
return (physical_line.find('def'),
"T104: Scenario tests require a service decorator")
@core.flake8ext
def no_setup_teardown_class_for_tests(physical_line, filename):
if pycodestyle.noqa(physical_line):
return
if 'tempest/test.py' in filename or 'tempest/lib/' in filename:
return
if SETUP_TEARDOWN_CLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
"T105: (setUp|tearDown)Class can not be used in tests")
@core.flake8ext
def service_tags_not_in_module_path(physical_line, filename):
"""Check that a service tag isn't in the module path
A service tag should only be added if the service name isn't already in
the module path.
T107
"""
# NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
# created for services like heat which would cause false negatives for
# those tests, so just exclude the scenario tests.
if 'tempest/scenario' not in filename:
matches = SCENARIO_DECORATOR.match(physical_line)
if matches:
services = matches.group(1).split(',')
for service in services:
service_name = service.strip().strip("'")
modulepath = os.path.split(filename)[0]
if service_name in modulepath:
return (physical_line.find(service_name),
"T107: service tag should not be in path")
@core.flake8ext
def no_hyphen_at_end_of_rand_name(logical_line, filename):
"""Check no hyphen at the end of rand_name() argument
T108
"""
msg = "T108: hyphen should not be specified at the end of rand_name()"
if RAND_NAME_HYPHEN_RE.match(logical_line):
return 0, msg
@core.flake8ext
def no_mutable_default_args(logical_line):
"""Check that mutable object isn't used as default argument
N322: Method's default argument shouldn't be mutable
"""
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
@core.flake8ext
def no_testtools_skip_decorator(logical_line):
"""Check that methods do not have the testtools.skip decorator
T109
"""
if TESTTOOLS_SKIP_DECORATOR.match(logical_line):
yield (0, "T109: Cannot use testtools.skip decorator; instead use "
"decorators.skip_because from tempest.lib")
def _common_service_clients_check(logical_line, physical_line, filename,
ignored_list_file=None):
if not re.match('tempest/(lib/)?services/.*', filename):
return False
if ignored_list_file is not None:
ignored_list = []
with open('tempest/hacking/' + ignored_list_file) as f:
for line in f:
ignored_list.append(line.strip())
if filename in ignored_list:
return False
if not METHOD.match(physical_line):
return False
if pycodestyle.noqa(physical_line):
return False
return True
@core.flake8ext
def get_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of GET should be consistent
T110
"""
if not _common_service_clients_check(logical_line, physical_line,
filename, 'ignored_list_T110.txt'):
return
for line in lines[line_number:]:
if METHOD.match(line) or CLASS.match(line):
# the end of a method
return
if 'self.get(' not in line and ('self.show_resource(' not in line and
'self.list_resources(' not in line):
continue
if METHOD_GET_RESOURCE.match(logical_line):
return
msg = ("T110: [GET /resources] methods should be list_<resource name>s"
" or show_<resource name>")
yield (0, msg)
@core.flake8ext
def delete_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of DELETE should be consistent
T111
"""
if not _common_service_clients_check(logical_line, physical_line,
filename, 'ignored_list_T111.txt'):
return
for line in lines[line_number:]:
if METHOD.match(line) or CLASS.match(line):
# the end of a method
return
if 'self.delete(' not in line and 'self.delete_resource(' not in line:
continue
if METHOD_DELETE_RESOURCE.match(logical_line):
return
msg = ("T111: [DELETE /resources/<id>] methods should be "
"delete_<resource name>")
yield (0, msg)
@core.flake8ext
def dont_import_local_tempest_into_lib(logical_line, filename):
"""Check that tempest.lib should not import local tempest code
T112
"""
if 'tempest/lib/' not in filename:
return
if not ('from tempest' in logical_line or
'import tempest' in logical_line):
return
if ('from tempest.lib' in logical_line or
'import tempest.lib' in logical_line):
return
msg = ("T112: tempest.lib should not import local tempest code to avoid "
"circular dependency")
yield (0, msg)
@core.flake8ext
def use_rand_uuid_instead_of_uuid4(logical_line, filename):
"""Check that tests use data_utils.rand_uuid() instead of uuid.uuid4()
T113
"""
if 'tempest/lib/' in filename:
return
if 'uuid.uuid4()' not in logical_line:
return
msg = ("T113: Tests should use data_utils.rand_uuid()/rand_uuid_hex() "
"instead of uuid.uuid4()/uuid.uuid4().hex")
yield (0, msg)
@core.flake8ext
def dont_use_config_in_tempest_lib(logical_line, filename):
"""Check that tempest.lib doesn't use tempest config
T114
"""
if 'tempest/lib/' not in filename:
return
if ('tempest.config' in logical_line or
'from tempest import config' in logical_line or
'oslo_config' in logical_line):
msg = ('T114: tempest.lib can not have any dependency on tempest '
'config.')
yield(0, msg)
@core.flake8ext
def dont_put_admin_tests_on_nonadmin_path(logical_line,
filename):
"""Check admin tests should exist under admin path
T115
"""
if 'tempest/api/' not in filename:
return
if not re.match(r'class .*Test.*\(.*Admin.*\):', logical_line):
return
if not re.match(r'.\/tempest\/api\/.*\/admin\/.*', filename):
msg = 'T115: All admin tests should exist under admin path.'
yield(0, msg)
@core.flake8ext
def unsupported_exception_attribute_PY3(logical_line):
"""Check Unsupported 'message' exception attribute in PY3
T116
"""
result = EX_ATTRIBUTE.search(logical_line)
msg = ("[T116] Unsupported 'message' Exception attribute in PY3")
if result:
yield(0, msg)
@core.flake8ext
def negative_test_attribute_always_applied_to_negative_tests(physical_line,
filename):
"""Check ``@decorators.attr(type=['negative'])`` applied to negative tests.
T117
"""
global _HAVE_NEGATIVE_DECORATOR
if re.match(r'.\/tempest\/api\/.*_negative.*', filename):
if NEGATIVE_TEST_DECORATOR.match(physical_line):
_HAVE_NEGATIVE_DECORATOR = True
return
if TEST_DEFINITION.match(physical_line):
if not _HAVE_NEGATIVE_DECORATOR:
return (
0, "T117: Must apply `@decorators.attr(type=['negative'])`"
" to all negative API tests"
)
_HAVE_NEGATIVE_DECORATOR = False
| 31.963746
| 79
| 0.636578
| 1,338
| 10,580
| 4.85426
| 0.189088
| 0.047421
| 0.036952
| 0.015242
| 0.36736
| 0.288068
| 0.182448
| 0.162125
| 0.105158
| 0.091917
| 0
| 0.017672
| 0.261909
| 10,580
| 330
| 80
| 32.060606
| 0.814061
| 0.180246
| 0
| 0.356757
| 0
| 0
| 0.226009
| 0.061295
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086486
| false
| 0
| 0.064865
| 0
| 0.302703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a98547230e4cc83fa248137ca0fde09ebb67dcf
| 1,018
|
py
|
Python
|
data/train/python/6a98547230e4cc83fa248137ca0fde09ebb67dcfController.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/6a98547230e4cc83fa248137ca0fde09ebb67dcfController.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/6a98547230e4cc83fa248137ca0fde09ebb67dcfController.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
import SimpleXMLRPCServer
import sys
import logging
from K8055Controller import K8055Controller
logging.basicConfig()
controller_log = logging.getLogger("Controller")
class Controller:
def __init__(self):
self.k8055 = K8055Controller()
controller_log.debug("initialized")
def reset(self):
self.k8055.reset()
controller_log.debug("reset")
return 0
def turn_on(self, i):
self.k8055.turn_on(i)
controller_log.debug('turned on %i' % (i))
return 0
def turn_off(self, i):
self.k8055.turn_off(i)
controller_log.debug('turned off %i' % (i))
return 0
def set_analog(self, i, level):
if (i == 1):
self.k8055.set_analog1(level)
else:
self.k8055.set_analog2(level)
return 0
controller = Controller()
server = SimpleXMLRPCServer.SimpleXMLRPCServer(("d6349.mysql.zone.ee", 7000))
server.register_instance(controller)
server.serve_forever()
| 24.829268
| 77
| 0.634578
| 118
| 1,018
| 5.322034
| 0.364407
| 0.085987
| 0.11465
| 0.044586
| 0.175159
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067639
| 0.259332
| 1,018
| 41
| 78
| 24.829268
| 0.765252
| 0
| 0
| 0.125
| 0
| 0
| 0.068695
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15625
| false
| 0
| 0.125
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9a4141ccd8a77a2a296371f9b8eb6510494db4
| 1,487
|
py
|
Python
|
tokendito/tool.py
|
pcmxgti/tokendito
|
c1672917b1b95e463c5bdf8e9c3c039189da8e42
|
[
"Apache-2.0"
] | 40
|
2019-07-31T03:21:03.000Z
|
2022-03-29T23:57:19.000Z
|
tokendito/tool.py
|
pcmxgti/tokendito
|
c1672917b1b95e463c5bdf8e9c3c039189da8e42
|
[
"Apache-2.0"
] | 27
|
2019-08-07T06:40:15.000Z
|
2022-03-21T18:46:49.000Z
|
tokendito/tool.py
|
pcmxgti/tokendito
|
c1672917b1b95e463c5bdf8e9c3c039189da8e42
|
[
"Apache-2.0"
] | 16
|
2019-07-31T14:22:04.000Z
|
2022-02-16T12:55:27.000Z
|
# vim: set filetype=python ts=4 sw=4
# -*- coding: utf-8 -*-
"""This module retrieves AWS credentials after authenticating with Okta."""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from future import standard_library
from tokendito import aws_helpers
from tokendito import helpers
from tokendito import okta_helpers
from tokendito import settings
standard_library.install_aliases()
def cli(args):
"""Tokendito retrieves AWS credentials after authenticating with Okta."""
# Set some required initial values
args = helpers.setup(args)
logging.debug("tokendito retrieves AWS credentials after authenticating with Okta.")
# Collect and organize user specific information
helpers.process_options(args)
# Authenticate okta and AWS also use assumerole to assign the role
logging.debug("Authenticate user with Okta and AWS.")
secret_session_token = okta_helpers.authenticate_user(
settings.okta_org, settings.okta_username, settings.okta_password
)
saml_response_string, saml_xml = aws_helpers.authenticate_to_roles(
secret_session_token, settings.okta_aws_app_url
)
assume_role_response, role_name = aws_helpers.select_assumeable_role(
saml_response_string, saml_xml
)
aws_helpers.ensure_keys_work(assume_role_response)
helpers.set_local_credentials(
assume_role_response, role_name, settings.aws_region, settings.aws_output
)
| 31.638298
| 88
| 0.774042
| 190
| 1,487
| 5.784211
| 0.442105
| 0.029117
| 0.069154
| 0.076433
| 0.263876
| 0.216561
| 0.216561
| 0.10737
| 0
| 0
| 0
| 0.002414
| 0.164089
| 1,487
| 46
| 89
| 32.326087
| 0.881738
| 0.228648
| 0
| 0
| 0
| 0
| 0.090989
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0.038462
| 0.269231
| 0
| 0.307692
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9c07074d315021100d6322a18c6bc3087be1db
| 15,833
|
py
|
Python
|
ir_datasets/formats/trec.py
|
cakiki/ir_datasets
|
7f9f8e9ff62e49d40383220ecc2daa250695d267
|
[
"Apache-2.0"
] | null | null | null |
ir_datasets/formats/trec.py
|
cakiki/ir_datasets
|
7f9f8e9ff62e49d40383220ecc2daa250695d267
|
[
"Apache-2.0"
] | null | null | null |
ir_datasets/formats/trec.py
|
cakiki/ir_datasets
|
7f9f8e9ff62e49d40383220ecc2daa250695d267
|
[
"Apache-2.0"
] | null | null | null |
import io
import codecs
import tarfile
import re
import gzip
import xml.etree.ElementTree as ET
from fnmatch import fnmatch
from pathlib import Path
from typing import NamedTuple
import ir_datasets
from ir_datasets.indices import PickleLz4FullStore
from .base import GenericDoc, GenericQuery, GenericScoredDoc, BaseDocs, BaseQueries, BaseScoredDocs, BaseQrels
class TrecDoc(NamedTuple):
doc_id: str
text: str
marked_up_doc: str
class TitleUrlTextDoc(NamedTuple):
doc_id: str
title: str
url: str
text: str
class TrecQuery(NamedTuple):
query_id: str
title: str
description: str
narrative: str
class TrecSubtopic(NamedTuple):
number: str
text: str
type: str
class TrecQrel(NamedTuple):
query_id: str
doc_id: str
relevance: int
iteration: str
class TrecPrel(NamedTuple):
query_id: str
doc_id: str
relevance: int
method: int
iprob: float
# Default content tags from Anserini's TrecCollection
CONTENT_TAGS = 'TEXT HEADLINE TITLE HL HEAD TTL DD DATE LP LEADPARA'.split()
class TrecDocs(BaseDocs):
def __init__(self, docs_dlc, encoding=None, path_globs=None, content_tags=CONTENT_TAGS, parser='BS4', namespace=None, lang=None, expected_file_count=None, docstore_size_hint=None, count_hint=None):
self._docs_dlc = docs_dlc
self._encoding = encoding
self._path_globs = path_globs
self._content_tags = content_tags
self._parser = {
'BS4': self._parser_bs,
'text': self._parser_text,
'tut': self._parser_tut,
}[parser]
self._doc = {
'BS4': TrecDoc,
'text': GenericDoc,
'tut': TitleUrlTextDoc,
}[parser]
self._docs_namespace = namespace
self._docs_lang = lang
self._expected_file_count = expected_file_count
self._docstore_size_hint = docstore_size_hint
self._count_hint = count_hint
if expected_file_count is not None:
assert self._path_globs is not None, "expected_file_count only supported with path_globs"
def docs_path(self, force=True):
return self._docs_dlc.path(force)
@ir_datasets.util.use_docstore
def docs_iter(self):
if Path(self._docs_dlc.path()).is_dir():
if self._path_globs:
file_count = 0
for glob in sorted(self._path_globs):
for path in sorted(Path(self._docs_dlc.path()).glob(glob)):
file_count += 1
yield from self._docs_iter(path)
if self._expected_file_count is not None:
if file_count != self._expected_file_count:
raise RuntimeError(f'found {file_count} files of the expected {self._expected_file_count} matching the following: {self._path_globs} under {self._docs_dlc.path()}. Make sure that directories are linked such that these globs match the correct number of files.')
else:
yield from self._docs_iter(self._docs_dlc.path())
else:
if self._path_globs:
file_count = 0
# tarfile, find globs, open in streaming mode (r|)
with self._docs_dlc.stream() as stream:
with tarfile.open(fileobj=stream, mode='r|gz') as tarf:
for block in tarf:
if any(fnmatch(block.name, g) for g in self._path_globs):
file = tarf.extractfile(block)
if block.name.endswith('.gz'):
file = gzip.GzipFile(fileobj=file)
yield from self._parser(file)
file_count += 1
if self._expected_file_count is not None:
if file_count != self._expected_file_count:
raise RuntimeError(f'found {file_count} files of the expected {self._expected_file_count} matching the following: {self._path_globs} under {self._docs_dlc.path()}. Make sure that directories are linked such that these globs match the correct number of files.')
else:
with self._docs_dlc.stream() as f:
yield from self._parser(f)
def _docs_iter(self, path):
if Path(path).is_file():
if str(path).endswith('.gz'):
with gzip.open(path, 'rb') as f:
yield from self._parser(f)
else:
with path.open('rb') as f:
yield from self._parser(f)
elif Path(path).is_dir():
for child in path.iterdir():
yield from self._docs_iter(child)
def _parser_bs(self, stream):
BeautifulSoup = ir_datasets.lazy_libs.bs4().BeautifulSoup
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_markup = None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
elif line == '</DOC>\n':
soup = BeautifulSoup(f'<OUTER>\n{doc_markup}\n</OUTER>', 'lxml')
text = soup.get_text()
yield TrecDoc(doc_id, text, doc_markup)
doc_id, doc_markup = None, ''
else:
if in_tag:
doc_markup += line
if line.startswith('</'):
if any(line.startswith(f'</{tag}>') for tag in self._content_tags):
in_tag -= 1
if line.startswith('<'):
if any(line.startswith(f'<{tag}>') for tag in self._content_tags):
in_tag += 1
if in_tag == 1:
doc_markup += line
def _parser_text(self, stream):
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_text = None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
elif line == '</DOC>\n':
yield GenericDoc(doc_id, doc_text)
doc_id, doc_text = None, ''
else:
if line.startswith('</'):
if any(line.startswith(f'</{tag}>') for tag in self._content_tags):
in_tag = False
if in_tag:
doc_text += line
if line.startswith('<'):
if any(line.startswith(f'<{tag}>') for tag in self._content_tags):
in_tag = True
def _parser_tut(self, stream):
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_title, doc_url, doc_text = None, None, None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
if line.startswith('<TITLE>'):
doc_title = line.replace('<TITLE>', '').replace('</TITLE>\n', '').strip()
if line.startswith('<URL>'):
doc_url = line.replace('<URL>', '').replace('</URL>\n', '').strip()
elif line == '</DOC>\n':
yield TitleUrlTextDoc(doc_id, doc_title, doc_url, doc_text)
doc_id, doc_title, doc_url, doc_text = None, None, None, ''
else:
if line.startswith('</TEXT>'):
in_tag = False
if in_tag:
doc_text += line
if line.startswith('<TEXT>'):
in_tag = True
def docs_cls(self):
return self._doc
def docs_store(self, field='doc_id'):
return PickleLz4FullStore(
path=f'{self.docs_path(force=False)}.pklz4',
init_iter_fn=self.docs_iter,
data_cls=self.docs_cls(),
lookup_field=field,
index_fields=['doc_id'],
size_hint=self._docstore_size_hint,
count_hint=self._count_hint,
)
def docs_count(self):
if self.docs_store().built():
return self.docs_store().count()
def docs_namespace(self):
return self._docs_namespace
def docs_lang(self):
return self._docs_lang
DEFAULT_QTYPE_MAP = {
'<num> *(Number:)?': 'query_id',
'<title> *(Topic:)?': 'title',
'<desc> *(Description:)?': 'description',
'<narr> *(Narrative:)?': 'narrative'
}
class TrecQueries(BaseQueries):
def __init__(self, queries_dlc, qtype=TrecQuery, qtype_map=None, encoding=None, namespace=None, lang=None, remove_tags=('</title>',)):
self._queries_dlc = queries_dlc
self._qtype = qtype
self._qtype_map = qtype_map or DEFAULT_QTYPE_MAP
self._encoding = encoding
self._queries_namespace = namespace
self._queries_lang = lang
self._remove_tags = remove_tags
def queries_path(self):
return self._queries_dlc.path()
def queries_iter(self):
fields, reading = {}, None
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for line in f:
if line.startswith('</top>'):
assert len(fields) == len(self._qtype._fields), fields
for tag in self._remove_tags:
fields = {k: v.replace(tag, '') for k, v in fields.items()}
yield self._qtype(*(fields[f].strip() for f in self._qtype._fields))
fields, reading = {}, None
match_any = False
for tag, target in self._qtype_map.items():
match = re.match(tag, line)
if match:
fields[target] = line[match.end():]
reading = target
match_any = True
break
if not match_any and reading and not line.startswith('<'):
fields[reading] += line
def queries_cls(self):
return self._qtype
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecXmlQueries(BaseQueries):
def __init__(self, queries_dlc, qtype=TrecQuery, qtype_map=None, encoding=None, subtopics_key='subtopics', namespace=None, lang=None):
self._queries_dlc = queries_dlc
self._qtype = qtype
self._qtype_map = qtype_map or {f: f for f in qtype._fields}
self._encoding = encoding
self._subtopics_key = subtopics_key
self._queries_namespace = namespace
self._queries_lang = lang
def queries_path(self):
return self._queries_dlc.path()
def queries_iter(self):
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for topic_el in ET.fromstring(f.read()):
item = [None for _ in self._qtype._fields]
if 'number' in topic_el.attrib:
item[self._qtype._fields.index('query_id')] = topic_el.attrib['number']
subtopics = []
for attr in topic_el.attrib:
if attr in self._qtype_map:
text = topic_el.attrib[attr]
field = self._qtype_map[attr]
item[self._qtype._fields.index(field)] = text
if topic_el.tag in self._qtype_map:
text = ''.join(topic_el.itertext())
field = self._qtype_map[topic_el.tag]
item[self._qtype._fields.index(field)] = text
for field_el in topic_el:
if field_el.tag in self._qtype_map:
text = ''.join(field_el.itertext())
field = self._qtype_map[field_el.tag]
item[self._qtype._fields.index(field)] = text
if field_el.tag == 'subtopic':
text = ''.join(field_el.itertext())
subtopics.append(TrecSubtopic(field_el.attrib['number'], text, field_el.attrib['type']))
if self._subtopics_key in self._qtype._fields:
item[self._qtype._fields.index('subtopics')] = tuple(subtopics)
qid_field = self._qtype._fields.index('query_id')
item[qid_field] = item[qid_field].strip() # remove whitespace from query_ids
yield self._qtype(*item)
def queries_cls(self):
return self._qtype
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecColonQueries(BaseQueries):
def __init__(self, queries_dlc, encoding=None, namespace=None, lang=None):
self._queries_dlc = queries_dlc
self._encoding = encoding
self._queries_namespace = namespace
self._queries_lang = lang
def queries_iter(self):
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for line in f:
query_id, text = line.split(':', 1)
text = text.rstrip('\n')
yield GenericQuery(query_id, text)
def queries_path(self):
return self._queries_dlc.path()
def queries_cls(self):
return GenericQuery
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecQrels(BaseQrels):
def __init__(self, qrels_dlc, qrels_defs):
self._qrels_dlc = qrels_dlc
self._qrels_defs = qrels_defs
def qrels_path(self):
return self._qrels_dlc.path()
def qrels_iter(self):
with self._qrels_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
if line == '\n':
continue # ignore blank lines
cols = line.rstrip().split()
if len(cols) != 4:
raise RuntimeError(f'expected 4 columns, got {len(cols)}')
qid, it, did, score = cols
yield TrecQrel(qid, did, int(score), it)
def qrels_cls(self):
return TrecQrel
def qrels_defs(self):
return self._qrels_defs
class TrecPrels(TrecQrels):
def qrels_iter(self):
with self._qrels_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
if line == '\n':
continue # ignore blank lines
cols = line.rstrip().split()
if len(cols) != 5:
raise RuntimeError(f'expected 5 columns, got {len(cols)}')
qid, did, rel, method, iprob = cols
yield TrecPrel(qid, did, int(rel), int(method), float(iprob))
def qrels_cls(self):
return TrecPrel
class TrecScoredDocs(BaseScoredDocs):
def __init__(self, scoreddocs_dlc):
self._scoreddocs_dlc = scoreddocs_dlc
def scoreddocs_path(self):
return self._scoreddocs_dlc.path()
def scoreddocs_iter(self):
with self._scoreddocs_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
cols = line.rstrip().split()
if len(cols) == 6:
qid, _, did, _, score, _ = cols
elif len(cols) == 2:
qid, did, score = *cols, '0'
yield GenericScoredDoc(qid, did, float(score))
| 38.429612
| 284
| 0.55978
| 1,860
| 15,833
| 4.52043
| 0.130108
| 0.02676
| 0.028306
| 0.022479
| 0.489653
| 0.442793
| 0.410442
| 0.393911
| 0.373335
| 0.354781
| 0
| 0.002943
| 0.334681
| 15,833
| 411
| 285
| 38.523114
| 0.795234
| 0.0108
| 0
| 0.439437
| 0
| 0.005634
| 0.077095
| 0.010731
| 0
| 0
| 0
| 0
| 0.005634
| 1
| 0.112676
| false
| 0
| 0.033803
| 0.061972
| 0.312676
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9c552700ad0a75cac33278ee8dc5a5139c2432
| 844
|
py
|
Python
|
textpand/download.py
|
caufieldjh/textpand-for-kgs
|
42853c53c5a4cc06fbd745c147d02fe7916690fa
|
[
"BSD-3-Clause"
] | 3
|
2021-12-10T21:13:47.000Z
|
2021-12-10T23:36:18.000Z
|
textpand/download.py
|
caufieldjh/textpand-for-kgs
|
42853c53c5a4cc06fbd745c147d02fe7916690fa
|
[
"BSD-3-Clause"
] | 1
|
2022-01-06T20:59:07.000Z
|
2022-01-06T20:59:07.000Z
|
textpand/download.py
|
caufieldjh/textpand-for-kgs
|
42853c53c5a4cc06fbd745c147d02fe7916690fa
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .utils import download_from_yaml
def download(output_dir: str, snippet_only: bool, ignore_cache: bool = False) -> None:
"""Downloads data files from list of URLs (default: download.yaml) into data directory (default: data/).
Args:
output_dir: A string pointing to the location to download data to.
snippet_only: Downloads only the first 5 kB of the source, for testing and file checks.
ignore_cache: Ignore cache and download files even if they exist [false]
Returns:
None.
"""
download_from_yaml(yaml_file="download.yaml",
output_dir=output_dir,
snippet_only=snippet_only,
ignore_cache=ignore_cache,
verbose=True)
return None
| 31.259259
| 108
| 0.625592
| 107
| 844
| 4.775701
| 0.523364
| 0.107632
| 0.062622
| 0.086106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003367
| 0.296209
| 844
| 26
| 109
| 32.461538
| 0.856902
| 0.491706
| 0
| 0
| 0
| 0
| 0.033079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9cb003c79f63e5985173912dffc928314248d4
| 6,770
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# (c) 2020, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_ontap_autosupport_invoke
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'
}
DOCUMENTATION = '''
module: na_ontap_autosupport_invoke
author: NetApp Ansible Team (@carchi8py) <[email protected]>
short_description: NetApp ONTAP send AutoSupport message
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
version_added: '20.4.0'
description:
- Send an AutoSupport message from a node
options:
name:
description:
- The name of the node to send the message to.
- Not specifying this option invokes AutoSupport on all nodes in the cluster.
type: str
autosupport_message:
description:
- Text sent in the subject line of the AutoSupport message.
type: str
aliases:
- message
version_added: 20.8.0
type:
description:
- Type of AutoSupport Collection to Issue.
choices: ['test', 'performance', 'all']
default: 'all'
type: str
uri:
description:
- send the AutoSupport message to the destination you specify instead of the configured destination.
type: str
'''
EXAMPLES = '''
- name: Send message
na_ontap_autosupport_invoke:
name: node1
message: invoked test autosupport rest
uri: http://1.2.3.4/delivery_uri
type: test
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPasupInvoke(object):
''' send ASUP message '''
def __init__(self):
self.use_rest = False
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=False, type='str'),
autosupport_message=dict(required=False, type='str', aliases=["message"]),
type=dict(required=False, choices=[
'test', 'performance', 'all'], default='all'),
uri=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
# REST API should be used for ONTAP 9.6 or higher.
self.rest_api = OntapRestAPI(self.module)
if self.rest_api.is_rest():
self.use_rest = True
else:
if not HAS_NETAPP_LIB:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_nodes(self):
nodes = list()
node_obj = netapp_utils.zapi.NaElement('system-node-get-iter')
desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
node_details_info = netapp_utils.zapi.NaElement('node-details-info')
node_details_info.add_new_child('node', '')
desired_attributes.add_child_elem(node_details_info)
node_obj.add_child_elem(desired_attributes)
try:
result = self.server.invoke_successfully(node_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) > 0:
node_info = result.get_child_by_name('attributes-list')
if node_info is not None:
nodes = [node_details.get_child_content('node') for node_details in node_info.get_children()]
return nodes
def send_zapi_message(self, params, node_name):
params['node-name'] = node_name
send_message = netapp_utils.zapi.NaElement.create_node_with_children('autosupport-invoke', **params)
try:
self.server.invoke_successfully(send_message, enable_tunneling=False)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
% (node_name, to_native(error)),
exception=traceback.format_exc())
def send_message(self):
params = dict()
if self.parameters.get('autosupport_message'):
params['message'] = self.parameters['autosupport_message']
if self.parameters.get('type'):
params['type'] = self.parameters['type']
if self.parameters.get('uri'):
params['uri'] = self.parameters['uri']
if self.use_rest:
if self.parameters.get('name'):
params['node.name'] = self.parameters['name']
node_name = params['node.name']
else:
node_name = '*'
api = 'support/autosupport/messages'
dummy, error = self.rest_api.post(api, params)
if error is not None:
self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
% (node_name, error))
else:
if self.parameters.get('name'):
node_names = [self.parameters['name']]
else:
# simulate REST behavior by sending to all nodes in the cluster
node_names = self.get_nodes()
for name in node_names:
self.send_zapi_message(params, name)
def ems_log_event(self):
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
return netapp_utils.ems_log_event("na_ontap_autosupport_invoke", cserver)
def apply(self):
if not self.use_rest:
self.ems_log_event()
if self.module.check_mode:
pass
else:
self.send_message()
self.module.exit_json(changed=True)
def main():
message = NetAppONTAPasupInvoke()
message.apply()
if __name__ == '__main__':
main()
| 34.365482
| 109
| 0.644018
| 816
| 6,770
| 5.121324
| 0.264706
| 0.034219
| 0.021536
| 0.022733
| 0.24001
| 0.152668
| 0.135918
| 0.116774
| 0.104092
| 0.056234
| 0
| 0.005343
| 0.253619
| 6,770
| 196
| 110
| 34.540816
| 0.82169
| 0.042393
| 0
| 0.167742
| 0
| 0
| 0.275793
| 0.030317
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045161
| false
| 0.012903
| 0.045161
| 0
| 0.109677
| 0.006452
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9cd0c545ed5aa451bbc0bc26a2e800d471ecd0
| 304
|
py
|
Python
|
tests/api/serializer/test_user.py
|
armandomeeuwenoord/freight
|
31ae2fa9252ab0b25385abd04742475e6671e3b1
|
[
"Apache-2.0"
] | 562
|
2015-02-20T08:25:24.000Z
|
2021-11-12T19:58:44.000Z
|
tests/api/serializer/test_user.py
|
armandomeeuwenoord/freight
|
31ae2fa9252ab0b25385abd04742475e6671e3b1
|
[
"Apache-2.0"
] | 129
|
2015-02-20T07:41:14.000Z
|
2022-02-17T21:14:40.000Z
|
tests/api/serializer/test_user.py
|
armandomeeuwenoord/freight
|
31ae2fa9252ab0b25385abd04742475e6671e3b1
|
[
"Apache-2.0"
] | 54
|
2015-02-28T01:12:23.000Z
|
2021-03-02T11:14:52.000Z
|
from freight.api.serializer import serialize
from freight.testutils import TestCase
class UserSerializerTest(TestCase):
def test_simple(self):
user = self.create_user()
result = serialize(user)
assert result["id"] == str(user.id)
assert result["name"] == user.name
| 25.333333
| 44
| 0.680921
| 36
| 304
| 5.694444
| 0.583333
| 0.107317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213816
| 304
| 11
| 45
| 27.636364
| 0.857741
| 0
| 0
| 0
| 0
| 0
| 0.019737
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9cdc6c74a18d65dd44c9480dd5e3953a78dd18
| 1,639
|
py
|
Python
|
binning/pozo_5m_class_dem.py
|
UP-RS-ESP/GEW-DAP04-WS201819
|
18341620d9168e1eec476af1d8f568cf0017bf56
|
[
"MIT"
] | 2
|
2020-10-12T11:33:00.000Z
|
2021-12-20T06:33:54.000Z
|
binning/pozo_5m_class_dem.py
|
UP-RS-ESP/GEW-DAP04-WS201819
|
18341620d9168e1eec476af1d8f568cf0017bf56
|
[
"MIT"
] | null | null | null |
binning/pozo_5m_class_dem.py
|
UP-RS-ESP/GEW-DAP04-WS201819
|
18341620d9168e1eec476af1d8f568cf0017bf56
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
from matplotlib import pyplot as pl
from rw import WriteGTiff
fn = '../pozo-steep-vegetated-pcl.npy'
pts = np.load(fn)
x, y, z, c = pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 5]
ix = (0.2 * (x - x.min())).astype('int')
iy = (0.2 * (y - y.min())).astype('int')
shape = (100, 100)
xb = np.arange(shape[1]+1)
yb = np.arange(shape[0]+1)
fg, ax = pl.subplots(ncols = 2, nrows = 2,
figsize = (10.24, 10.24),
sharex = True, sharey = True)
uc = (2, 5)
for j in range(len(uc)):
print('Class %i' % uc[j])
b = c == uc[j]
cx, cy, cz = ix[b], iy[b], z[b]
mean = np.zeros(shape)
stdr = np.zeros(shape)
for i in range(shape[0]):
print('% 3d%%' % i)
for k in range(shape[1]):
b = (cy == i) * (cx == k)
mean[i, k] = cz[b].mean()
stdr[i, k] = cz[b].std()
fname = 'pozo_5m_dem_mean_cl%i.tif' % uc[j]
WriteGTiff(fname, mean, x.min(), y.min()+500, step = 5)
np.save('pozo_5m_dem_mean_cl%i.npy' % uc[j], mean)
np.save('pozo_5m_dem_stdr_cl%i.npy' % uc[j], stdr)
ax[0, j].set_title('Class %i' % uc[j])
im = ax[0, j].pcolormesh(xb, yb,
np.ma.masked_invalid(mean),
cmap = pl.cm.viridis_r)
cb = fg.colorbar(im, ax = ax[0, j])
cb.set_label('Mean elevation [m]')
im = ax[1, j].pcolormesh(xb, yb,
np.ma.masked_invalid(stdr),
cmap = pl.cm.magma_r)
cb = fg.colorbar(im, ax = ax[1, j])
cb.set_label('Elevation STD')
ax[0, j].set_aspect('equal')
ax[1, j].set_aspect('equal')
pl.savefig('%s.png' % sys.argv[0][:-3])
| 30.351852
| 59
| 0.52349
| 287
| 1,639
| 2.916376
| 0.344948
| 0.021505
| 0.019116
| 0.021505
| 0.203106
| 0.160096
| 0.121864
| 0.076464
| 0
| 0
| 0
| 0.040765
| 0.266626
| 1,639
| 53
| 60
| 30.924528
| 0.655574
| 0
| 0
| 0
| 0
| 0
| 0.110433
| 0.064674
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.086957
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9cfc593e93acc1f1c0f3afda04be08e714940c
| 2,228
|
py
|
Python
|
comtypes/_meta.py
|
phuslu/pyMSAA
|
611bc4c31e0d6ba36f0f0bebdc6e6be14b994eb0
|
[
"MIT"
] | 23
|
2015-05-28T15:31:35.000Z
|
2022-02-16T07:51:34.000Z
|
comtypes/_meta.py
|
kar98kar/pyMSAA
|
611bc4c31e0d6ba36f0f0bebdc6e6be14b994eb0
|
[
"MIT"
] | 3
|
2020-05-19T03:00:52.000Z
|
2020-11-03T09:22:51.000Z
|
comtypes/_meta.py
|
kar98kar/pyMSAA
|
611bc4c31e0d6ba36f0f0bebdc6e6be14b994eb0
|
[
"MIT"
] | 13
|
2016-08-26T23:00:40.000Z
|
2022-03-03T09:58:36.000Z
|
# comtypes._meta helper module
from ctypes import POINTER, c_void_p, cast
import comtypes
################################################################
# metaclass for CoClass (in comtypes/__init__.py)
def _wrap_coclass(self):
# We are an IUnknown pointer, represented as a c_void_p instance,
# but we really want this interface:
itf = self._com_interfaces_[0]
punk = cast(self, POINTER(itf))
result = punk.QueryInterface(itf)
result.__dict__["__clsid"] = str(self._reg_clsid_)
return result
def _coclass_from_param(cls, obj):
if isinstance(obj, (cls._com_interfaces_[0], cls)):
return obj
raise TypeError(obj)
#
# The mro() of a POINTER(App) type, where class App is a subclass of CoClass:
#
# POINTER(App)
# App
# CoClass
# c_void_p
# _SimpleCData
# _CData
# object
class _coclass_meta(type):
# metaclass for CoClass
#
# When a CoClass subclass is created, create a POINTER(...) type
# for that class, with bases <coclass> and c_void_p. Also, the
# POINTER(...) type gets a __ctypes_from_outparam__ method which
# will QueryInterface for the default interface: the first one on
# the coclass' _com_interfaces_ list.
def __new__(cls, name, bases, namespace):
klass = type.__new__(cls, name, bases, namespace)
if bases == (object,):
return klass
# XXX We should insist that a _reg_clsid_ is present.
if "_reg_clsid_" in namespace:
clsid = namespace["_reg_clsid_"]
comtypes.com_coclass_registry[str(clsid)] = klass
PTR = _coclass_pointer_meta("POINTER(%s)" % klass.__name__,
(klass, c_void_p),
{"__ctypes_from_outparam__": _wrap_coclass,
"from_param": classmethod(_coclass_from_param),
})
from ctypes import _pointer_type_cache
_pointer_type_cache[klass] = PTR
return klass
# will not work if we change the order of the two base classes!
class _coclass_pointer_meta(type(c_void_p), _coclass_meta):
pass
| 35.935484
| 85
| 0.601436
| 267
| 2,228
| 4.64794
| 0.389513
| 0.024174
| 0.029009
| 0.037067
| 0.038678
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001266
| 0.290844
| 2,228
| 61
| 86
| 36.52459
| 0.784177
| 0.342908
| 0
| 0.066667
| 0
| 0
| 0.056274
| 0.018251
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.033333
| 0.1
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9da9d2fe8534cba2998ec7d3c2190abe55abec
| 5,190
|
py
|
Python
|
deep_sdf/workspace.py
|
huajian1069/non-convex_optimisation
|
cf4cd5070524c3f7e6b814fe9b85a15a06e7b8db
|
[
"MIT"
] | 2
|
2020-10-12T19:22:50.000Z
|
2021-08-21T21:48:27.000Z
|
deep_sdf/workspace.py
|
huajian1069/non-convex_optimisation
|
cf4cd5070524c3f7e6b814fe9b85a15a06e7b8db
|
[
"MIT"
] | 13
|
2020-04-17T09:07:06.000Z
|
2020-07-25T19:43:44.000Z
|
deep_sdf/workspace.py
|
huajian1069/non-convex-optimisation
|
cf4cd5070524c3f7e6b814fe9b85a15a06e7b8db
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import json
import os
import torch
model_params_subdir = "ModelParameters"
optimizer_params_subdir = "OptimizerParameters"
latent_codes_subdir = "LatentCodes"
logs_filename = "Logs.pth"
reconstructions_subdir = "Reconstructions"
reconstruction_meshes_subdir = "Meshes"
reconstruction_codes_subdir = "Codes"
optimizations_subdir = "Optimizations"
optimizations_meshes_subdir = "Meshes"
optimizations_codes_subdir = "Codes"
specifications_filename = "specs.json"
data_source_map_filename = ".datasources.json"
evaluation_subdir = "Evaluation"
sdf_samples_subdir = "SdfSamples"
renders_subdir = "Renders"
surface_samples_subdir = "SurfaceSamples"
normalization_param_subdir = "NormalizationParameters"
training_meshes_subdir = "TrainingMeshes"
def load_experiment_specifications(experiment_directory):
filename = os.path.join(experiment_directory, specifications_filename)
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include specifications file "
+ '"specs.json"'.format(experiment_directory)
)
return json.load(open(filename))
def load_model_parameters(experiment_directory, checkpoint, decoder):
filename = os.path.join(
experiment_directory, model_params_subdir, checkpoint + ".pth"
)
if not os.path.isfile(filename):
raise Exception('model state dict "{}" does not exist'.format(filename))
data = torch.load(filename)
decoder.load_state_dict(data["model_state_dict"])
return data["epoch"]
def build_decoder(experiment_directory, experiment_specs):
arch = __import__(
"networks." + experiment_specs["NetworkArch"], fromlist=["Decoder"]
)
latent_size = experiment_specs["CodeLength"]
decoder = arch.Decoder(latent_size, **experiment_specs["NetworkSpecs"]).cuda()
return decoder
def load_decoder(
experiment_directory, experiment_specs, checkpoint, data_parallel=True
):
decoder = build_decoder(experiment_directory, experiment_specs)
if data_parallel:
decoder = torch.nn.DataParallel(decoder)
epoch = load_model_parameters(experiment_directory, checkpoint, decoder)
return (decoder, epoch)
def load_latent_vectors(experiment_directory, checkpoint):
filename = os.path.join(
experiment_directory, latent_codes_subdir, checkpoint + ".pth"
)
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include a latent code file"
+ " for checkpoint '{}'".format(experiment_directory, checkpoint)
)
data = torch.load(filename)
if isinstance(data["latent_codes"], torch.Tensor):
num_vecs = data["latent_codes"].size()[0]
lat_vecs = []
for i in range(num_vecs):
lat_vecs.append(data["latent_codes"][i].cuda())
return lat_vecs
else:
num_embeddings, embedding_dim = data["latent_codes"]["weight"].shape
lat_vecs = torch.nn.Embedding(num_embeddings, embedding_dim)
lat_vecs.load_state_dict(data["latent_codes"])
return lat_vecs.weight.data.detach()
def get_data_source_map_filename(data_dir):
return os.path.join(data_dir, data_source_map_filename)
def get_reconstructed_mesh_filename(
experiment_dir, epoch, dataset, class_name, instance_name
):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_meshes_subdir,
dataset,
class_name,
instance_name + ".ply",
)
def get_reconstructed_code_filename(
experiment_dir, epoch, dataset, class_name, instance_name
):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_codes_subdir,
dataset,
class_name,
instance_name + ".pth",
)
def get_evaluation_dir(experiment_dir, checkpoint, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, evaluation_subdir, checkpoint)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_model_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, model_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_optimizer_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, optimizer_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_latent_codes_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, latent_codes_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_normalization_params_filename(
data_dir, dataset_name, class_name, instance_name
):
return os.path.join(
data_dir,
normalization_param_subdir,
dataset_name,
class_name,
instance_name + ".npz",
)
| 25.566502
| 82
| 0.7158
| 608
| 5,190
| 5.807566
| 0.205592
| 0.030586
| 0.031153
| 0.050977
| 0.464741
| 0.435004
| 0.337581
| 0.306429
| 0.29595
| 0.29595
| 0
| 0.001431
| 0.1921
| 5,190
| 202
| 83
| 25.693069
| 0.840687
| 0.014451
| 0
| 0.315385
| 0
| 0
| 0.110307
| 0.004498
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.030769
| 0.030769
| 0.238462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9e779e59feb114fa5c597307d0f0ba536c3a82
| 1,571
|
py
|
Python
|
EmoPy/EmoPy/examples/convolutional_dropout_model.py
|
Rahmatullina/FinalYearProject
|
326f521b9f600dbbc7ace2223bd5aafc79b2267c
|
[
"Apache-2.0"
] | null | null | null |
EmoPy/EmoPy/examples/convolutional_dropout_model.py
|
Rahmatullina/FinalYearProject
|
326f521b9f600dbbc7ace2223bd5aafc79b2267c
|
[
"Apache-2.0"
] | 9
|
2020-09-26T01:09:35.000Z
|
2022-02-10T01:32:30.000Z
|
EmoPy/EmoPy/examples/convolutional_dropout_model.py
|
Rahmatullina/FinalYearProject
|
326f521b9f600dbbc7ace2223bd5aafc79b2267c
|
[
"Apache-2.0"
] | null | null | null |
from EmoPy.src.fermodel import FERModel
from EmoPy.src.directory_data_loader import DirectoryDataLoader
from EmoPy.src.csv_data_loader import CSVDataLoader
from EmoPy.src.data_generator import DataGenerator
from EmoPy.src.neuralnets import ConvolutionalNNDropout
from sklearn.model_selection import train_test_split
import numpy as np
from pkg_resources import resource_filename,resource_exists
validation_split = 0.15
target_dimensions = (48, 48)
channels = 1
verbose = True
print('--------------- Convolutional Dropout Model -------------------')
print('Loading data...')
directory_path = resource_filename('EmoPy.examples','image_data/sample_image_directory')
data_loader = DirectoryDataLoader(datapath=directory_path, validation_split=validation_split)
dataset = data_loader.load_data()
if verbose:
dataset.print_data_details()
print('Preparing training/testing data...')
train_images, train_labels = dataset.get_training_data()
train_gen = DataGenerator().fit(train_images, train_labels)
test_images, test_labels = dataset.get_test_data()
test_gen = DataGenerator().fit(test_images, test_labels)
print('Training net...')
model = ConvolutionalNNDropout(target_dimensions, channels, dataset.get_emotion_index_map(), verbose=True)
model.fit_generator(train_gen.generate(target_dimensions, batch_size=5),
test_gen.generate(target_dimensions, batch_size=5),
epochs=15)
# Save model configuration
# model.export_model('output/conv2d_model.json','output/conv2d_weights.h5',"output/conv2d_emotion_map.json", emotion_map)
| 38.317073
| 121
| 0.789306
| 200
| 1,571
| 5.915
| 0.385
| 0.038039
| 0.050719
| 0.037194
| 0.062553
| 0.062553
| 0.062553
| 0
| 0
| 0
| 0
| 0.011315
| 0.099936
| 1,571
| 40
| 122
| 39.275
| 0.825318
| 0.091661
| 0
| 0
| 0
| 0
| 0.122191
| 0.023174
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.275862
| 0
| 0.275862
| 0.172414
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9f71d63576d36e576c5ed1a561ba09b6a33e88
| 45,622
|
py
|
Python
|
deepstream_ignition_usb_yolo.py
|
valdivj/Deepstream-IGN-Maker-YOLO
|
f38ece731e9797a525da932c3da2de77e48f45af
|
[
"Unlicense"
] | 18
|
2021-02-09T11:07:57.000Z
|
2022-03-16T12:35:34.000Z
|
deepstream_ignition_usb_yolo.py
|
valdivj/Deepstream-IGN-Maker-YOLO
|
f38ece731e9797a525da932c3da2de77e48f45af
|
[
"Unlicense"
] | null | null | null |
deepstream_ignition_usb_yolo.py
|
valdivj/Deepstream-IGN-Maker-YOLO
|
f38ece731e9797a525da932c3da2de77e48f45af
|
[
"Unlicense"
] | 3
|
2021-02-11T00:23:56.000Z
|
2021-11-16T02:15:37.000Z
|
#!/usr/bin/env python3
################################################################################
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
import sys
sys.path.append('../')
sys.path.insert(0, "../../../client_libraries/python/")
import paho.mqtt.client as mqtt
import sparkplug_b as sparkplug
import time
import time, threading
import random
import string
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from sparkplug_b import *
import pyds
# Application Variables
serverUrl = "localhost"
myGroupId = "Sparkplug B Devices"
myNodeName = "NVIDIA"
myDeviceName = "XavierNX"
publishPeriod = 5000
myUsername = "admin"
myPassword = "changeme"
client = mqtt.Client(serverUrl, 1883, 60)
WAIT_SECONDS = 1
frame_numberx = 0
num_rectsx = 0
counter1 = 0
counter2 = 0
Object1 = 0
Object2 = 0
Object3 = 0
Object4 = 0
Object5 = 0
Object6 = 0
Object7 = 0
Object8 = 0
Object9 = 0
Object10 = 0
newValue1 = 0
newValue2 = 0
newValue3 = 0
newValue4 = 0
newValue5 = 0
newValue6 = 0
newValue7 = 0
newValue8 = 0
newValue9 = 0
newValue10 = 0
class AliasMap:
Next_Server = 0
Rebirth = 1
Reboot = 2
Device_frame_numberx = 3
Device_num_rectsx = 4
Device_Metric0 = 5
Device_Metric1 = 6
Device_Metric2 = 7
Device_Metric3 = 8
Device_Metric4 = 9
Device_counter1 = 10
Device_counter2 = 11
Device_Input1 = 12
Device_Input2 = 13
Device_Input3 = 14
Device_Input4 = 15
Device_Input5 = 16
Device_Input6 = 17
Device_Input7 = 18
Device_Input8 = 19
Device_Input9 = 20
Device_Input10 = 21
Device_Output1 = 22
Device_Output2 = 23
Device_Output3 = 24
Device_Output4 = 25
Device_Output5 = 26
Device_Output6 = 27
Device_Output7 = 28
Device_Output8 = 29
Device_Output9 = 30
Device_Output10 = 31
MAX_DISPLAY_LEN=64
PGIE_CLASS_ID_TOOTHBRUSH = 79
PGIE_CLASS_ID_HAIR_DRYER = 78
PGIE_CLASS_ID_TEDDY_BEAR = 77
PGIE_CLASS_ID_SCISSORS = 76
PGIE_CLASS_ID_VASE = 75
PGIE_CLASS_ID_CLOCK = 74
PGIE_CLASS_ID_BOOK = 73
PGIE_CLASS_ID_REFRIGERATOR = 72
PGIE_CLASS_ID_SINK = 71
PGIE_CLASS_ID_TOASTER = 70
PGIE_CLASS_ID_OVEN = 69
PGIE_CLASS_ID_MICROWAVE = 68
PGIE_CLASS_ID_CELL_PHONE = 67
PGIE_CLASS_ID_KEYBOARD = 66
PGIE_CLASS_ID_REMOTE = 65
PGIE_CLASS_ID_MOUSE = 64
PGIE_CLASS_ID_LAPTOP = 63
PGIE_CLASS_ID_TVMONITOR = 62
PGIE_CLASS_ID_TOILET = 61
PGIE_CLASS_ID_DININGTABLE= 60
PGIE_CLASS_ID_BED = 59
PGIE_CLASS_ID_POTTEDPLANT = 58
PGIE_CLASS_ID_SOFA = 57
PGIE_CLASS_ID_CHAIR = 56
PGIE_CLASS_ID_CAKE = 55
PGIE_CLASS_ID_DONUT = 54
PGIE_CLASS_ID_PIZZA = 53
PGIE_CLASS_ID_HOT_DOG = 52
PGIE_CLASS_ID_CARROT = 51
PGIE_CLASS_ID_BROCCOLI = 50
PGIE_CLASS_ID_ORANGE = 49
PGIE_CLASS_ID_SANDWICH = 48
PGIE_CLASS_ID_APPLE = 47
PGIE_CLASS_ID_BANANA = 46
PGIE_CLASS_ID_BOWL = 45
PGIE_CLASS_ID_SPOON = 44
PGIE_CLASS_ID_KNIFE = 43
PGIE_CLASS_ID_FORK = 42
PGIE_CLASS_ID_CUP = 41
PGIE_CLASS_ID_WINE_GLASS = 40
PGIE_CLASS_ID_BOTTLE = 39
PGIE_CLASS_ID_TENNIS_RACKET = 38
PGIE_CLASS_ID_SURFBOARD = 37
PGIE_CLASS_ID_SKATEBOARD = 36
PGIE_CLASS_ID_BASEBALL_GLOVE = 35
PGIE_CLASS_ID_BASEBALL_BAT = 34
PGIE_CLASS_ID_KITE = 33
PGIE_CLASS_ID_SPORTS_BALL = 32
PGIE_CLASS_ID_SNOWBOARD = 31
PGIE_CLASS_ID_SKIS = 30
PGIE_CLASS_ID_FRISBEE = 29
PGIE_CLASS_ID_SUITCASE = 28
PGIE_CLASS_ID_TIE = 27
PGIE_CLASS_ID_HANDBAG = 26
PGIE_CLASS_ID_UMBRELLA = 25
PGIE_CLASS_ID_BACKPACK = 24
PGIE_CLASS_ID_GIRAFFE = 23
PGIE_CLASS_ID_ZEBRA = 22
PGIE_CLASS_ID_BEAR = 21
PGIE_CLASS_ID_ELEPHANT = 20
PGIE_CLASS_ID_COW = 19
PGIE_CLASS_ID_SHEEP = 18
PGIE_CLASS_ID_HORSE = 17
PGIE_CLASS_ID_DOG = 16
PGIE_CLASS_ID_CAT = 15
PGIE_CLASS_ID_BIRD = 14
PGIE_CLASS_ID_BENCH = 13
PGIE_CLASS_ID_PARKING_METER = 12
PGIE_CLASS_ID_STOP_SIGN = 11
PGIE_CLASS_ID_FIRE_HYDRANT = 10
PGIE_CLASS_ID_TRAFFIC_LIGHT = 9
PGIE_CLASS_ID_BOAT = 8
PGIE_CLASS_ID_TRUCK = 7
PGIE_CLASS_ID_TRAIN = 6
PGIE_CLASS_ID_BUS = 5
PGIE_CLASS_ID_AEROPLANE = 4
PGIE_CLASS_ID_MOTORBIKE = 3
PGIE_CLASS_ID_VEHICLE = 2
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 0
pgie_classes_str= ["Toothbrush", "Hair dryer", "Teddy bear","Scissors","Vase", "Clock", "Book","Refrigerator", "Sink", "Toaster","Oven","Microwave", "Cell phone", "Keyboard","Remote", "Mouse", "Laptop","Tvmonitor","Toilet", "Diningtable", "Bed","Pottedplant", "Sofa", "Chair","Cake","Donut", "Pizza", "Hot dog","Carrot", "Broccli", "Orange","Sandwich","Apple", "Banana", "Bowl","Spoon", "Knife", "Fork","Cup","Wine Glass", "Bottle", "Tennis racket","Surfboard", "Skateboard", "Baseball glove","Baseball bat","Kite", "Sports ball", "Snowboard","Skis", "Frisbee", "Suitcase","Tie","Handbag", "Umbrella", "Backpack","Giraffe", "Zebra", "Bear","Elephant","Cow", "Sheep", "Horse","Dog", "Cat", "Bird","Bench","Parking meter", "Stop sign", "Fire hydrant","Traffic light", "Boat", "Truck","Train","Bus", "Areoplane", "Motorbike","Car", "Bicycle", "Person"]
######################################################################
# The callback for when the client receives a CONNACK response from the server.
######################################################################
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected with result code "+str(rc))
else:
print("Failed to connect with result code "+str(rc))
sys.exit()
global myGroupId
global myNodeName
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("spBv1.0/" + myGroupId + "/NCMD/" + myNodeName + "/#")
client.subscribe("spBv1.0/" + myGroupId + "/DCMD/" + myNodeName + "/#")
######################################################################
######################################################################
# The callback for when a PUBLISH message is received from the server.
######################################################################
def on_message(client, userdata, msg):
print("Message arrived: " + msg.topic)
tokens = msg.topic.split("/")
global newValue1
global newValue2
global newValue3
global newValue4
global newValue5
global newValue6
global newValue7
global newValue8
global newValue9
global newValue10
if tokens[0] == "spBv1.0" and tokens[1] == myGroupId and (tokens[2] == "NCMD" or tokens[2] == "DCMD") and tokens[3] == myNodeName:
inboundPayload = sparkplug_b_pb2.Payload()
inboundPayload.ParseFromString(msg.payload)
for metric in inboundPayload.metrics:
if metric.name == "Node Control/Next Server" or metric.alias == AliasMap.Next_Server:
# 'Node Control/Next Server' is an NCMD used to tell the device/client application to
# disconnect from the current MQTT server and connect to the next MQTT server in the
# list of available servers. This is used for clients that have a pool of MQTT servers
# to connect to.
print ("'Node Control/Next Server' is not implemented in this example")
elif metric.name == "Node Control/Rebirth" or metric.alias == AliasMap.Rebirth:
# 'Node Control/Rebirth' is an NCMD used to tell the device/client application to resend
# its full NBIRTH and DBIRTH again. MQTT Engine will send this NCMD to a device/client
# application if it receives an NDATA or DDATA with a metric that was not published in the
# original NBIRTH or DBIRTH. This is why the application must send all known metrics in
# its original NBIRTH and DBIRTH messages.
publishBirth()
elif metric.name == "Node Control/Reboot" or metric.alias == AliasMap.Reboot:
# 'Node Control/Reboot' is an NCMD used to tell a device/client application to reboot
# This can be used for devices that need a full application reset via a soft reboot.
# In this case, we fake a full reboot with a republishing of the NBIRTH and DBIRTH
# messages.
publishBirth()
elif metric.name == "output/Device Metric2" or metric.alias == AliasMap.Device_Metric2:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue = metric.int_value
print ("CMD message for output/Device Metric2 - New Value: {}".format(newValue))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric2, MetricDataType.Int16, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 1
#publishBirth()
elif metric.name == "output/Device Input1" or metric.alias == AliasMap.Device_Input1:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue1 = metric.int_value
print ("CMD message for output/Device Input1 - New Value: {}".format(newValue1))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input1, MetricDataType.Int16, newValue1)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 2
#publishBirth()
elif metric.name == "output/Device Input2" or metric.alias == AliasMap.Device_Input2:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue2 = metric.int_value
print ("CMD message for output/Device Input2 - New Value: {}".format(newValue2))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input2, MetricDataType.Int16, newValue2)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 3
#publishBirth()
elif metric.name == "output/Device Input3" or metric.alias == AliasMap.Device_Input3:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue3 = metric.int_value
print ("CMD message for output/Device Input3 - New Value: {}".format(newValue3))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input3, MetricDataType.Int16, newValue3)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 4
#publishBirth()
elif metric.name == "output/Device Input4" or metric.alias == AliasMap.Device_Input4:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue4 = metric.int_value
print ("CMD message for output/Device Input4 - New Value: {}".format(newValue4))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input4, MetricDataType.Int16, newValue4)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 5
#publishBirth()
elif metric.name == "output/Device Input5" or metric.alias == AliasMap.Device_Input5:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue5 = metric.int_value
print ("CMD message for output/Device Input5 - New Value: {}".format(newValue5))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input5, MetricDataType.Int16, newValue5)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 6
#publishBirth()
elif metric.name == "output/Device Input6" or metric.alias == AliasMap.Device_Input6:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue6 = metric.int_value
print ("CMD message for output/Device Input6 - New Value: {}".format(newValue6))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input6, MetricDataType.Int16, newValue6)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 7
#publishBirth()
elif metric.name == "output/Device Input7" or metric.alias == AliasMap.Device_Input7:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue7 = metric.int_value
print ("CMD message for output/Device Input7 - New Value: {}".format(newValue7))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input7, MetricDataType.Int16, newValue7)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 8
#publishBirth()
elif metric.name == "output/Device Input8" or metric.alias == AliasMap.Device_Input8:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue8 = metric.int_value
print ("CMD message for output/Device Input8 - New Value: {}".format(newValue8))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input8, MetricDataType.Int16, newValue8)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 9
#publishBirth()
elif metric.name == "output/Device Input9" or metric.alias == AliasMap.Device_Input9:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue9 = metric.int_value
print ("CMD message for output/Device Input9 - New Value: {}".format(newValue9))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input9, MetricDataType.Int16, newValue9)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 10
#publishBirth()
elif metric.name == "output/Device Input10" or metric.alias == AliasMap.Device_Input10:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue10 = metric.int_value
print ("CMD message for output/Device Input10 - New Value: {}".format(newValue10))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input10, MetricDataType.Int16, newValue10)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
#global newValue4
#publishBirth()
elif metric.name == "output/Device Metric4" or metric.alias == AliasMap.Device_Metric4:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue = metric.string_value
print ("CMD message for output/Device Metric4 - New Value: {}".format(newValue))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric4, MetricDataType.String, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
#publishBirth()
elif metric.name == "output/Device Metric3" or metric.alias == AliasMap.Device_Metric3:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Boolean because of how we declated it in the DBIRTH
newValue = metric.boolean_value
print ("CMD message for output/Device Metric3 - New Value: %r" % newValue)
# Create the DDATA payload - use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric3, MetricDataType.Boolean, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
else:
print ("Unknown command: " + metric.name)
else:
print ("Unknown command...")
print ("Done publishing")
#####################################################################
######################################################################
######################################################################
# Publish the BIRTH certificates
######################################################################
def publishBirth():
publishNodeBirth()
publishDeviceBirth()
######################################################################
######################################################################
# Publish the NBIRTH certificate
######################################################################
def publishNodeBirth():
print ("Publishing Node Birth")
# Create the node birth payload
payload = sparkplug.getNodeBirthPayload()
# Set up the Node Controls
addMetric(payload, "Node Control/Next Server", AliasMap.Next_Server, MetricDataType.Boolean, False)
addMetric(payload, "Node Control/Rebirth", AliasMap.Rebirth, MetricDataType.Boolean, False)
addMetric(payload, "Node Control/Reboot", AliasMap.Reboot, MetricDataType.Boolean, False)
# Publish the node birth certificate
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/NBIRTH/" + myNodeName, byteArray, 0, False)
######################################################################
######################################################################
# Publish the DBIRTH certificate
######################################################################
def publishDeviceBirth():
print ("Publishing Device Birth")
# Get the payload
payload = sparkplug.getDeviceBirthPayload()
# Add some device metrics
addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx )
addMetric(payload, "input/Device Metric0", AliasMap.Device_Metric0, MetricDataType.String, "hello device")
addMetric(payload, "input/Device Metric1", AliasMap.Device_Metric1, MetricDataType.Boolean, True)
addMetric(payload, "input/Number of Objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx )
addMetric(payload, "output/Device Metric2", AliasMap.Device_Metric2, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input1", AliasMap.Device_Input1, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input2", AliasMap.Device_Input2, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input3", AliasMap.Device_Input3, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input4", AliasMap.Device_Input4, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input5", AliasMap.Device_Input5, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input6", AliasMap.Device_Input6, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input7", AliasMap.Device_Input7, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input8", AliasMap.Device_Input8, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input9", AliasMap.Device_Input9, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input10", AliasMap.Device_Input10, MetricDataType.Int16, 0)
addMetric(payload,"input/Device Output1", AliasMap.Device_Output1, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output2", AliasMap.Device_Output2, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output3", AliasMap.Device_Output3, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output4", AliasMap.Device_Output4, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output5", AliasMap.Device_Output5, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output6", AliasMap.Device_Output6, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output7", AliasMap.Device_Output7, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output8", AliasMap.Device_Output8, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output9", AliasMap.Device_Output9, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output10", AliasMap.Device_Output10, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Metric3", AliasMap.Device_Metric3, MetricDataType.Boolean, True)
addMetric(payload, "output/Device Metric4", AliasMap.Device_Metric4, MetricDataType.String, "start")
# Publish the initial data with the Device BIRTH certificate
totalByteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DBIRTH/" + myNodeName + "/" + myDeviceName, totalByteArray, 0, False)
######################################################################
######################################################################
def osd_sink_pad_buffer_probe(pad,info,u_data):
global frame_numberx
global num_rectsx
global Object1
global Object2
global Object3
global Object4
global Object5
global Object6
global Object7
global Object8
global Object9
global Object10
#Intiallizing object counter with 0.
obj_counter = {
PGIE_CLASS_ID_TOOTHBRUSH:0,
PGIE_CLASS_ID_HAIR_DRYER:0,
PGIE_CLASS_ID_TEDDY_BEAR:0,
PGIE_CLASS_ID_SCISSORS:0,
PGIE_CLASS_ID_VASE:0,
PGIE_CLASS_ID_CLOCK:0,
PGIE_CLASS_ID_BOOK:0,
PGIE_CLASS_ID_REFRIGERATOR:0,
PGIE_CLASS_ID_SINK:0,
PGIE_CLASS_ID_TOASTER:0,
PGIE_CLASS_ID_OVEN:0,
PGIE_CLASS_ID_MICROWAVE:0,
PGIE_CLASS_ID_CELL_PHONE:0,
PGIE_CLASS_ID_KEYBOARD:0,
PGIE_CLASS_ID_REMOTE:0,
PGIE_CLASS_ID_MOUSE:0,
PGIE_CLASS_ID_LAPTOP:0,
PGIE_CLASS_ID_TVMONITOR:0,
PGIE_CLASS_ID_TOILET:0,
PGIE_CLASS_ID_DININGTABLE:0,
PGIE_CLASS_ID_BED:0,
PGIE_CLASS_ID_POTTEDPLANT:0,
PGIE_CLASS_ID_SOFA:0,
PGIE_CLASS_ID_CHAIR:0,
PGIE_CLASS_ID_CAKE:0,
PGIE_CLASS_ID_DONUT:0,
PGIE_CLASS_ID_PIZZA:0,
PGIE_CLASS_ID_HOT_DOG:0,
PGIE_CLASS_ID_CARROT:0,
PGIE_CLASS_ID_BROCCOLI:0,
PGIE_CLASS_ID_ORANGE:0,
PGIE_CLASS_ID_SANDWICH:0,
PGIE_CLASS_ID_APPLE:0,
PGIE_CLASS_ID_BANANA:0,
PGIE_CLASS_ID_BOWL:0,
PGIE_CLASS_ID_SPOON:0,
PGIE_CLASS_ID_KNIFE:0,
PGIE_CLASS_ID_FORK:0,
PGIE_CLASS_ID_CUP:0,
PGIE_CLASS_ID_WINE_GLASS:0,
PGIE_CLASS_ID_BOTTLE:0,
PGIE_CLASS_ID_TENNIS_RACKET:0,
PGIE_CLASS_ID_SURFBOARD:0,
PGIE_CLASS_ID_SKATEBOARD:0,
PGIE_CLASS_ID_BASEBALL_GLOVE:0,
PGIE_CLASS_ID_BASEBALL_BAT:0,
PGIE_CLASS_ID_KITE:0,
PGIE_CLASS_ID_SPORTS_BALL:0,
PGIE_CLASS_ID_SNOWBOARD:0,
PGIE_CLASS_ID_SKIS:0,
PGIE_CLASS_ID_FRISBEE:0,
PGIE_CLASS_ID_SUITCASE:0,
PGIE_CLASS_ID_TIE:0,
PGIE_CLASS_ID_HANDBAG:0,
PGIE_CLASS_ID_UMBRELLA:0,
PGIE_CLASS_ID_BACKPACK:0,
PGIE_CLASS_ID_GIRAFFE:0,
PGIE_CLASS_ID_ZEBRA:0,
PGIE_CLASS_ID_BEAR:0,
PGIE_CLASS_ID_ELEPHANT:0,
PGIE_CLASS_ID_COW:0,
PGIE_CLASS_ID_SHEEP:0,
PGIE_CLASS_ID_HORSE:0,
PGIE_CLASS_ID_DOG:0,
PGIE_CLASS_ID_CAT:0,
PGIE_CLASS_ID_BIRD:0,
PGIE_CLASS_ID_BENCH:0,
PGIE_CLASS_ID_PARKING_METER:0,
PGIE_CLASS_ID_STOP_SIGN:0,
PGIE_CLASS_ID_FIRE_HYDRANT:0,
PGIE_CLASS_ID_TRAFFIC_LIGHT:0,
PGIE_CLASS_ID_BOAT:0,
PGIE_CLASS_ID_TRUCK:0,
PGIE_CLASS_ID_TRAIN:0,
PGIE_CLASS_ID_BUS:0,
PGIE_CLASS_ID_AEROPLANE:0,
PGIE_CLASS_ID_MOTORBIKE:0,
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_PERSON:0
}
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.NvDsFrameMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
frame_numberx=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
num_rectsx = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
try:
l_obj=l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Bird_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_CUP], obj_counter[PGIE_CLASS_ID_BOTTLE])
Object1 = obj_counter[newValue1]
Object2 = obj_counter[newValue2]
Object3 = obj_counter[newValue3]
Object4 = obj_counter[newValue4]
Object5 = obj_counter[newValue5]
Object6 = obj_counter[newValue6]
Object7 = obj_counter[newValue7]
Object8 = obj_counter[newValue8]
Object9 = obj_counter[newValue9]
Object10 = obj_counter[newValue10]
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
# print(pyds.get_string(py_nvosd_text_params.display_text))
#pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
######################################################################
def main(args):
# Check input arguments
if len(args) != 2:
sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0])
sys.exit(1)
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
# Source element for reading from the file
print("Creating Source \n ")
source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
if not source:
sys.stderr.write(" Unable to create Source \n")
caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
if not caps_v4l2src:
sys.stderr.write(" Unable to create v4l2src capsfilter \n")
print("Creating Video Converter \n")
# Adding videoconvert -> nvvideoconvert as not all
# raw formats are supported by nvvideoconvert;
# Say YUYV is unsupported - which is the common
# raw format for many logi usb cams
# In case we have a camera with raw format supported in
# nvvideoconvert, GStreamer plugins' capability negotiation
# shall be intelligent enough to reduce compute by
# videoconvert doing passthrough (TODO we need to confirm this)
# videoconvert to make sure a superset of raw formats are supported
vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
if not vidconvsrc:
sys.stderr.write(" Unable to create videoconvert \n")
# nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
if not nvvidconvsrc:
sys.stderr.write(" Unable to create Nvvideoconvert \n")
caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
if not caps_vidconvsrc:
sys.stderr.write(" Unable to create capsfilter \n")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
# Use nvinfer to run inferencing on camera's output,
# behaviour of inferencing is set through config file
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
# Use convertor to convert from NV12 to RGBA as required by nvosd
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
# Create OSD to draw on the converted RGBA buffer
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
# Finally render the osd output
if is_aarch64():
transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
if not sink:
sys.stderr.write(" Unable to create egl sink \n")
print("Playing cam %s " %args[1])
caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1"))
caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
source.set_property('device', args[1])
streammux.set_property('width', 640)
streammux.set_property('height', 480)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "config_infer_primary_yoloV3.txt")
# Set sync = false to avoid late frame drops at the display-sink
sink.set_property('sync', False)
print("Adding elements to Pipeline \n")
pipeline.add(source)
pipeline.add(caps_v4l2src)
pipeline.add(vidconvsrc)
pipeline.add(nvvidconvsrc)
pipeline.add(caps_vidconvsrc)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
if is_aarch64():
pipeline.add(transform)
# we link the elements together
# v4l2src -> nvvideoconvert -> mux ->
# nvinfer -> nvvideoconvert -> nvosd -> video-renderer
print("Linking elements in the Pipeline \n")
source.link(caps_v4l2src)
caps_v4l2src.link(vidconvsrc)
vidconvsrc.link(nvvidconvsrc)
nvvidconvsrc.link(caps_vidconvsrc)
sinkpad = streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = caps_vidconvsrc.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
srcpad.link(sinkpad)
streammux.link(pgie)
pgie.link(nvvidconv)
nvvidconv.link(nvosd)
if is_aarch64():
nvosd.link(transform)
transform.link(sink)
else:
nvosd.link(sink)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# Lets add probe to get informed of the meta data generated, we add probe to
# the sink pad of the osd element, since by that time, the buffer would have
# had got all the metadata.
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
######################################################################
# Create the node death payload
deathPayload = sparkplug.getNodeDeathPayload()
# Start of main program - Set up the MQTT client connection
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(myUsername, myPassword)
deathByteArray = bytearray(deathPayload.SerializeToString())
client.will_set("spBv1.0/" + myGroupId + "/NDEATH/" + myNodeName, deathByteArray, 0, False)
client.connect(serverUrl, 1883, 60)
# Publish the birth certificates
publishBirth()
def foo():
# Periodically publish some new data
payload = sparkplug.getDdataPayload()
# Add some random data to the inputs
addMetric(payload, "input/number of objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx )
addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx )
addMetric(payload,"input/Device Output1", AliasMap.Device_Output1, MetricDataType.Int16, Object1)
addMetric(payload, "input/Device Output2", AliasMap.Device_Output2, MetricDataType.Int16, Object2)
addMetric(payload, "input/Device Output3", AliasMap.Device_Output3, MetricDataType.Int16, Object3)
addMetric(payload, "input/Device Output4", AliasMap.Device_Output4, MetricDataType.Int16, Object4)
addMetric(payload, "input/Device Output5", AliasMap.Device_Output5, MetricDataType.Int16, Object5)
addMetric(payload, "input/Device Output6", AliasMap.Device_Output6, MetricDataType.Int16, Object6)
addMetric(payload, "input/Device Output7", AliasMap.Device_Output7, MetricDataType.Int16, Object7)
addMetric(payload, "input/Device Output8", AliasMap.Device_Output8, MetricDataType.Int16, Object8)
addMetric(payload, "input/Device Output9", AliasMap.Device_Output9, MetricDataType.Int16, Object9)
addMetric(payload, "input/Device Output10", AliasMap.Device_Output10, MetricDataType.Int16, Object10)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Sit and wait for inbound or outbound events
for _ in range(1):
time.sleep(1)
client.loop()
threading.Timer(WAIT_SECONDS, foo).start()
foo()
######################################################################
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
#cleanup
print("Exiting app\n")
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 46.410987
| 849
| 0.645829
| 5,676
| 45,622
| 5.037703
| 0.141473
| 0.039903
| 0.062321
| 0.033154
| 0.495524
| 0.41827
| 0.38585
| 0.351193
| 0.345632
| 0.326467
| 0
| 0.026393
| 0.242602
| 45,622
| 982
| 850
| 46.458248
| 0.801123
| 0.260883
| 0
| 0.109855
| 0
| 0
| 0.134868
| 0.002756
| 0
| 0
| 0
| 0.001018
| 0
| 1
| 0.012924
| false
| 0.004847
| 0.021002
| 0
| 0.090469
| 0.051696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a9fcd8ecc089595a2ffc3a48b4ee67000ac218d
| 799
|
py
|
Python
|
src/pyams_i18n/tests/__init__.py
|
Py-AMS/pyams-i18n
|
dbb3953302311977653145385af02e4d1ae41431
|
[
"ZPL-2.1"
] | null | null | null |
src/pyams_i18n/tests/__init__.py
|
Py-AMS/pyams-i18n
|
dbb3953302311977653145385af02e4d1ae41431
|
[
"ZPL-2.1"
] | null | null | null |
src/pyams_i18n/tests/__init__.py
|
Py-AMS/pyams-i18n
|
dbb3953302311977653145385af02e4d1ae41431
|
[
"ZPL-2.1"
] | null | null | null |
#
# Copyright (c) 2015-2019 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""
Generic test cases for pyams_i18n doctests
"""
__docformat__ = 'restructuredtext'
import os
import sys
def get_package_dir(value):
"""Get package directory"""
package_dir = os.path.split(value)[0]
if package_dir not in sys.path:
sys.path.append(package_dir)
return package_dir
| 26.633333
| 75
| 0.740926
| 116
| 799
| 5.008621
| 0.681034
| 0.086059
| 0.048193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019817
| 0.178974
| 799
| 29
| 76
| 27.551724
| 0.865854
| 0.664581
| 0
| 0
| 0
| 0
| 0.065306
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aa02482ee4345f8d62c98b8785e029ed85945dd
| 1,639
|
py
|
Python
|
tqsdk/demo/example/momentum.py
|
boyscout2008/tqsdk-python
|
79496a938a44f79ea9164569637509d0cc7db70a
|
[
"Apache-2.0"
] | null | null | null |
tqsdk/demo/example/momentum.py
|
boyscout2008/tqsdk-python
|
79496a938a44f79ea9164569637509d0cc7db70a
|
[
"Apache-2.0"
] | null | null | null |
tqsdk/demo/example/momentum.py
|
boyscout2008/tqsdk-python
|
79496a938a44f79ea9164569637509d0cc7db70a
|
[
"Apache-2.0"
] | 1
|
2020-11-20T01:19:11.000Z
|
2020-11-20T01:19:11.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Ringo"
'''
价格动量 策略 (难度:初级)
参考: https://www.shinnytech.com/blog/momentum-strategy/
注: 该示例策略仅用于功能示范, 实盘时请根据自己的策略/经验进行修改
'''
from tqsdk import TqAccount, TqApi, TargetPosTask
# 设置指定合约,获取N条K线计算价格动量
SYMBOL = "SHFE.au1912"
N = 15
api = TqApi()
klines = api.get_kline_serial(SYMBOL, 60*60*24, N)
quote = api.get_quote(SYMBOL)
target_pos = TargetPosTask(api, SYMBOL)
position = api.get_position(SYMBOL)
# 编写价格动量函数AR,以前N-1日K线计算价格动量ar
def AR(kline1):
spread_ho = sum(kline1.high[:-1] - kline1.open[:-1])
spread_oc = sum(kline1.open[:-1] - kline1.low[:-1])
# spread_oc 为0时,设置为最小价格跳动值
if spread_oc == 0:
spread_oc = quote.price_tick
ar = (spread_ho/spread_oc)*100
return ar
ar = AR(klines)
print("策略开始启动")
while True:
api.wait_update()
# 生成新K线时,重新计算价格动量值ar
if api.is_changing(klines.iloc[-1], "datetime"):
ar = AR(klines)
print("价格动量是:", ar)
# 每次最新价发生变动时,重新进行判断
if api.is_changing(quote, "last_price"):
# 开仓策略
if position.pos_long == 0 and position.pos_short == 0:
# 如果ar大于110并且小于150,开多仓
if 110 < ar < 150:
print("价值动量超过110,小于150,做多")
target_pos.set_target_volume(100)
# 如果ar大于50,小于90,开空仓
elif 50 < ar < 90:
print("价值动量大于50,小于90,做空")
target_pos.set_target_volume(-100)
# 止损策略,多头下当前ar值小于90则平仓止损,空头下当前ar值大于110则平仓止损
elif (position.pos_long > 0 and ar < 90) or (position.pos_short > 0 and ar > 110):
print("止损平仓")
target_pos.set_target_volume(0)
| 26.015873
| 90
| 0.621721
| 216
| 1,639
| 4.560185
| 0.523148
| 0.040609
| 0.036548
| 0.054822
| 0.117767
| 0.054822
| 0
| 0
| 0
| 0
| 0
| 0.064673
| 0.245272
| 1,639
| 62
| 91
| 26.435484
| 0.731609
| 0.145821
| 0
| 0.058824
| 0
| 0
| 0.065882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.088235
| 0.147059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aa1d7c9f54267d6e42717a153600f7e111a7f9f
| 10,323
|
py
|
Python
|
color_transfer/__init__.py
|
AdamSpannbauer/color_transfer
|
155e0134615f35bf19bf32f4cacf056603604914
|
[
"MIT"
] | null | null | null |
color_transfer/__init__.py
|
AdamSpannbauer/color_transfer
|
155e0134615f35bf19bf32f4cacf056603604914
|
[
"MIT"
] | null | null | null |
color_transfer/__init__.py
|
AdamSpannbauer/color_transfer
|
155e0134615f35bf19bf32f4cacf056603604914
|
[
"MIT"
] | 1
|
2020-11-05T17:35:14.000Z
|
2020-11-05T17:35:14.000Z
|
# import the necessary packages
import numpy as np
import cv2
import imutils
def color_transfer(source, target, clip=True, preserve_paper=True):
"""
Transfers the color distribution from the source to the target
image using the mean and standard deviations of the L*a*b*
color space.
This implementation is (loosely) based on to the "Color Transfer
between Images" paper by Reinhard et al., 2001.
Parameters:
-------
source: NumPy array
OpenCV image in BGR color space (the source image)
target: NumPy array
OpenCV image in BGR color space (the target image)
clip: Should components of L*a*b* image be scaled by np.clip before
converting back to BGR color space?
If False then components will be min-max scaled appropriately.
Clipping will keep target image brightness truer to the input.
Scaling will adjust image brightness to avoid washed out portions
in the resulting color transfer that can be caused by clipping.
preserve_paper: Should color transfer strictly follow methodology
laid out in original paper? The method does not always produce
aesthetically pleasing results.
If False then L*a*b* components will scaled using the reciprocal of
the scaling factor proposed in the paper. This method seems to produce
more consistently aesthetically pleasing results
Returns:
-------
transfer: NumPy array
OpenCV image (w, h, 3) NumPy array (uint8)
"""
# convert the images from the RGB to L*ab* color space, being
# sure to utilizing the floating point data type (note: OpenCV
# expects floats to be 32-bit, so use that instead of 64-bit)
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32")
# compute color statistics for the source and target images
(lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source)
(lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target)
# subtract the means from the target image
(l, a, b) = cv2.split(target)
l -= lMeanTar
a -= aMeanTar
b -= bMeanTar
if preserve_paper:
# scale by the standard deviations using paper proposed factor
l = (lStdTar / lStdSrc) * l
a = (aStdTar / aStdSrc) * a
b = (bStdTar / bStdSrc) * b
else:
# scale by the standard deviations using reciprocal of paper proposed factor
l = (lStdSrc / lStdTar) * l
a = (aStdSrc / aStdTar) * a
b = (bStdSrc / bStdTar) * b
# add in the source mean
l += lMeanSrc
a += aMeanSrc
b += bMeanSrc
# clip/scale the pixel intensities to [0, 255] if they fall
# outside this range
l = _scale_array(l, clip=clip)
a = _scale_array(a, clip=clip)
b = _scale_array(b, clip=clip)
# merge the channels together and convert back to the RGB color
# space, being sure to utilize the 8-bit unsigned integer data
# type
transfer = cv2.merge([l, a, b])
transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR)
# return the color transferred image
return transfer
def auto_color_transfer(source, target):
"""Pick color_transfer result truest to source image color
Applies color_transfer with all possible combinations of the clip & preserve_paper arguments.
Mean absolute error (MAE) is computed for the HSV channels of each result and the source image.
The best_result that minimizes the MAE is returned as well as a montage of all candidate results.
Parameters:
-------
source: NumPy array
OpenCV image in BGR color space (the source image)
target: NumPy array
OpenCV image in BGR color space (the target image)
Returns:
-------
tuple: (best_result, comparison)
best_result: NumPy array
result that minimizes mean absolute error between compared to source image in HSV color space
comparison: NumPy array
image showing the results of all combinations of color_transfer options
"""
# get mean HSV stats from source image for comparison
hsv_source = cv2.cvtColor(source, cv2.COLOR_BGR2HSV)
hsv_hist_src = cv2.calcHist([hsv_source], [0, 1, 2], None,
[8, 8, 8], [0, 256, 0, 256, 0, 256])
# iterate through all 4 options for toggling color transfer
bools = [True, False]
candidates = []
best_result = None
best_dist = float('inf')
for clip in bools:
for preserve_paper in bools:
# create candidate image from options of this iteration
candidate = color_transfer(source, target, clip, preserve_paper)
# get mean HSV stats from candidate image for comparison
hsv_candidate = cv2.cvtColor(candidate, cv2.COLOR_BGR2HSV)
hsv_hist_cand = cv2.calcHist([hsv_candidate], [0, 1, 2], None,
[8, 8, 8], [0, 256, 0, 256, 0, 256])
# calc chi square dist
chi2_dist = chi2_distance(hsv_hist_src, hsv_hist_cand)
# propose new truest result if found new smallest mae
if chi2_dist < best_dist:
best_result = candidate[:]
candidates.append(candidate)
# build 2 by 2 image matrix of all candidates for comparison
comparison = np.hstack((np.vstack(candidates[:2]),
np.vstack(candidates[2:])))
# add border annotations showing values of params for each output
comparison = _bool_matrix_border(comparison)
return best_result, comparison
def chi2_distance(hist_a, hist_b, eps=1e-10):
return 0.5 * np.sum(((hist_a - hist_b) ** 2) / (hist_a + hist_b + eps))
def _bool_matrix_border(comparison_image):
"""Apply table formatting for comparison of color_transfer options
Parameters:
-------
target: NumPy array
OpenCV image in BGR color space (the comparison image produced in auto_color_transfer)
Returns:
-------
comparison: NumPy array
OpenCV image in BGR color space with borders applied to easily compare the different
results of the auto_color_transfer
"""
# 200 seems to work well as border size
border_size = 200
# put black border on top and left of input image
h, w = comparison_image.shape[:2]
top = np.zeros(w * border_size, dtype='uint8').reshape(border_size, w)
left = np.zeros((h + border_size) * border_size, dtype='uint8').reshape(h + border_size, border_size)
top = cv2.cvtColor(top, cv2.COLOR_GRAY2BGR)
left = cv2.cvtColor(left, cv2.COLOR_GRAY2BGR)
bordered_comparison_image = np.vstack((top, comparison_image))
bordered_comparison_image = np.hstack((left, bordered_comparison_image))
# add text for clip arg options to top border
top_title_loc = (border_size, 75)
top_true_loc = (border_size, 190)
top_false_loc = (int(border_size + w / 2), 190)
cv2.putText(bordered_comparison_image, 'Clip', top_title_loc,
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'True', top_true_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'False', top_false_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
# rotate 90 degrees for writing text to left border
bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, 90)
# add text for preserve paper arg options to left border
top_title_loc = (5, 75)
top_true_loc = (5 + int(h / 2), 190)
top_false_loc = (5, 190)
cv2.putText(bordered_comparison_image, 'Preserve Paper', top_title_loc,
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'True', top_true_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'False', top_false_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
# rotate -90 degrees to return image in correct orientation
bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, -90)
return bordered_comparison_image
def image_stats(image):
"""
Parameters:
-------
image: NumPy array
OpenCV image in L*a*b* color space
Returns:
-------
Tuple of mean and standard deviations for the L*, a*, and b*
channels, respectively
"""
# compute the mean and standard deviation of each channel
(l, a, b) = cv2.split(image)
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
# return the color statistics
return lMean, lStd, aMean, aStd, bMean, bStd
def _min_max_scale(arr, new_range=(0, 255)):
"""
Perform min-max scaling to a NumPy array
Parameters:
-------
arr: NumPy array to be scaled to [new_min, new_max] range
new_range: tuple of form (min, max) specifying range of
transformed array
Returns:
-------
NumPy array that has been scaled to be in
[new_range[0], new_range[1]] range
"""
# get array's current min and max
mn = arr.min()
mx = arr.max()
# check if scaling needs to be done to be in new_range
if mn < new_range[0] or mx > new_range[1]:
# perform min-max scaling
scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]
else:
# return array if already in range
scaled = arr
return scaled
def _scale_array(arr, clip=True):
"""
Trim NumPy array values to be in [0, 255] range with option of
clipping or scaling.
Parameters:
-------
arr: array to be trimmed to [0, 255] range
clip: should array be scaled by np.clip? if False then input
array will be min-max scaled to range
[max([arr.min(), 0]), min([arr.max(), 255])]
Returns:
-------
NumPy array that has been scaled to be in [0, 255] range
"""
if clip:
scaled = np.clip(arr, 0, 255)
else:
scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))
scaled = _min_max_scale(arr, new_range=scale_range)
return scaled
| 36.477032
| 105
| 0.657173
| 1,447
| 10,323
| 4.569454
| 0.213545
| 0.040835
| 0.048699
| 0.025408
| 0.277374
| 0.202511
| 0.161978
| 0.161978
| 0.156534
| 0.148367
| 0
| 0.032266
| 0.252446
| 10,323
| 282
| 106
| 36.606383
| 0.824543
| 0.473215
| 0
| 0.163462
| 0
| 0
| 0.013559
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067308
| false
| 0
| 0.028846
| 0.009615
| 0.163462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aa21222a53d441e6c157bad6965004f0771b6e4
| 250
|
py
|
Python
|
Python/Tree/TestCreateTreeLibraryImport.py
|
zseen/hackerrank-challenges
|
c154f039f58073ee3d94d012462c7055e68784b2
|
[
"MIT"
] | null | null | null |
Python/Tree/TestCreateTreeLibraryImport.py
|
zseen/hackerrank-challenges
|
c154f039f58073ee3d94d012462c7055e68784b2
|
[
"MIT"
] | null | null | null |
Python/Tree/TestCreateTreeLibraryImport.py
|
zseen/hackerrank-challenges
|
c154f039f58073ee3d94d012462c7055e68784b2
|
[
"MIT"
] | null | null | null |
from Library.CreateATree import CreateATree
tree = CreateATree.BinarySearchTree()
nodesList = list((4, 5, 1, 3, 2))
for i in range(0, len(nodesList)):
tree.insert(nodesList[i])
#tree.printInorder()
tree.printPreorder()
#tree.printPostorder()
| 19.230769
| 43
| 0.732
| 32
| 250
| 5.71875
| 0.71875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027397
| 0.124
| 250
| 12
| 44
| 20.833333
| 0.808219
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aa25f7e4d64679c81ca1e60dffb6ddf922f9c4c
| 522
|
py
|
Python
|
application/siteApp/urls.py
|
Marcelotsvaz/vaz-projects
|
8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4
|
[
"Unlicense"
] | null | null | null |
application/siteApp/urls.py
|
Marcelotsvaz/vaz-projects
|
8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4
|
[
"Unlicense"
] | null | null | null |
application/siteApp/urls.py
|
Marcelotsvaz/vaz-projects
|
8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4
|
[
"Unlicense"
] | null | null | null |
#
# VAZ Projects
#
#
# Author: Marcelo Tellier Sartori Vaz <[email protected]>
from django.urls import path
from . import views
app_name = 'siteApp'
urlpatterns = [
path( '', views.Home.as_view(), name = 'home' ),
path( 'about-me', views.About_me.as_view(), name = 'about_me' ),
path( 'search', views.Search.as_view(), name = 'search' ),
path( 'search/page/<int:page>', views.Search.as_view(), name = 'search' ),
path( 'sitemap.xml', views.Sitemap.as_view(), name = 'sitemap' ),
]
| 23.727273
| 77
| 0.628352
| 68
| 522
| 4.705882
| 0.426471
| 0.09375
| 0.15625
| 0.10625
| 0.19375
| 0.19375
| 0.19375
| 0
| 0
| 0
| 0
| 0
| 0.189655
| 522
| 22
| 78
| 23.727273
| 0.756501
| 0.143678
| 0
| 0
| 0
| 0
| 0.192744
| 0.049887
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aa2cafc9ca0f9283336142e3b81fea44a3587b3
| 1,286
|
py
|
Python
|
Classes/ServiceBase.py
|
tkeske/SMS-Fetcher
|
7b3ec0ea4517ad11164b8e2d7ee2c60d2a9f0ed2
|
[
"BSD-3-Clause"
] | null | null | null |
Classes/ServiceBase.py
|
tkeske/SMS-Fetcher
|
7b3ec0ea4517ad11164b8e2d7ee2c60d2a9f0ed2
|
[
"BSD-3-Clause"
] | null | null | null |
Classes/ServiceBase.py
|
tkeske/SMS-Fetcher
|
7b3ec0ea4517ad11164b8e2d7ee2c60d2a9f0ed2
|
[
"BSD-3-Clause"
] | null | null | null |
'''
@author Tomáš Keske
@since 10.8.2019
'''
import sys
from jnius import autoclass
from Conf.Conf import *
class ServiceBase():
def __init__(self):
PythonServiceClass = autoclass('org.kivy.android.PythonService')
self.Context = autoclass('android.content.Context')
self.Service = PythonServiceClass.mService
#set autorestart to be imune to task swiping on Android 9
self.Service.setAutoRestartService(True)
self.confDict = {k: v for k,v in globals().items() if k.isupper() and k.startswith("SMS")}
for k, v in confDict.items():
setattr(self, k, v)
def killGeneric(self, error):
print(repr(error))
PythonService.setAutoRestartService(False)
print("Autorestart of the service disabled.")
print("Attempting to kill service permanently.")
PythonService.stop()
#service takes time to stop. flow thus continues to next block of code
#sys.exit() is to prevent subsequent code from execution
#both calls are neccesary to avoid "Scheduling restart of crashed service process"
#in case we called only sys.exit()
#this applies even if we have setAutoRestartService(False)
print("Exiting python script")
sys.exit()
| 32.15
| 98
| 0.667963
| 159
| 1,286
| 5.377358
| 0.603774
| 0.009357
| 0.011696
| 0.016374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008197
| 0.241058
| 1,286
| 40
| 99
| 32.15
| 0.867828
| 0.301711
| 0
| 0
| 0
| 0
| 0.171558
| 0.059819
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.15
| 0
| 0.3
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aa359b860399eb8f9859835f9d9ac0f53b4de56
| 723
|
py
|
Python
|
api/queue/__init__.py
|
sofia008/api-redis-queue
|
8d65665c8a9f44990565baa8c7ba43d7f01425d3
|
[
"Apache-2.0"
] | null | null | null |
api/queue/__init__.py
|
sofia008/api-redis-queue
|
8d65665c8a9f44990565baa8c7ba43d7f01425d3
|
[
"Apache-2.0"
] | null | null | null |
api/queue/__init__.py
|
sofia008/api-redis-queue
|
8d65665c8a9f44990565baa8c7ba43d7f01425d3
|
[
"Apache-2.0"
] | null | null | null |
# api/queue/__init__.py
import os
from flask import Flask
from flask_bootstrap import Bootstrap
# instantiate the extensions
bootstrap = Bootstrap()
def create_app(script_info=None):
# instantiate the app
app = Flask(
__name__,
template_folder="../client/templates",
static_folder="../client/static",
)
# set config
app_settings = os.getenv("APP_SETTINGS")
app.config.from_object(app_settings)
# set up extensions
bootstrap.init_app(app)
# register blueprints
from api.queue.push.views import main_blueprint
app.register_blueprint(main_blueprint)
# shell context for flask cli
app.shell_context_processor({"app": app})
return app
| 19.026316
| 51
| 0.697095
| 89
| 723
| 5.404494
| 0.47191
| 0.037422
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214385
| 723
| 37
| 52
| 19.540541
| 0.846831
| 0.200553
| 0
| 0
| 0
| 0
| 0.087719
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.235294
| 0
| 0.352941
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aa447f55a379751c7664d4eb5818450b99462c4
| 2,183
|
py
|
Python
|
tests/test_engine.py
|
Foxboron/python-adblock
|
50b2ddba9f7b237b38c848c7d4a1637917444924
|
[
"Apache-2.0",
"MIT"
] | 35
|
2020-06-26T21:06:13.000Z
|
2022-03-19T10:50:35.000Z
|
tests/test_engine.py
|
Foxboron/python-adblock
|
50b2ddba9f7b237b38c848c7d4a1637917444924
|
[
"Apache-2.0",
"MIT"
] | 34
|
2020-04-27T02:59:40.000Z
|
2022-03-06T20:55:00.000Z
|
tests/test_engine.py
|
Foxboron/python-adblock
|
50b2ddba9f7b237b38c848c7d4a1637917444924
|
[
"Apache-2.0",
"MIT"
] | 6
|
2020-12-22T21:56:02.000Z
|
2022-02-16T02:13:21.000Z
|
import adblock
import pytest
SMALL_FILTER_LIST = """
||wikipedia.org^
||old.reddit.com^
||lobste.rs^
"""
def empty_engine():
return adblock.Engine(adblock.FilterSet())
def test_engine_creation_and_blocking():
filter_set = adblock.FilterSet(debug=True)
filter_set.add_filter_list(SMALL_FILTER_LIST)
engine = adblock.Engine(filter_set=filter_set)
blocker_result_wikipedia = engine.check_network_urls(
url="https://wikipedia.org/img.png",
source_url="https://google.com/",
request_type="image",
)
assert isinstance(blocker_result_wikipedia, adblock.BlockerResult)
assert blocker_result_wikipedia.matched
blocker_result_facebook = engine.check_network_urls(
"https://facebook.com/directory/img.png",
"https://old.reddit.com/r/all",
"image",
)
assert isinstance(blocker_result_facebook, adblock.BlockerResult)
assert not blocker_result_facebook.matched
def test_serde_file(tmpdir):
path = str(tmpdir / "cache.dat")
engine0 = empty_engine()
with pytest.raises(FileNotFoundError):
# We haven't created the cache.dat file, so we should get an exception
# when attempting to deserialize.
engine0.deserialize_from_file(path)
engine1 = empty_engine()
serialization_result = engine1.serialize_to_file(path)
assert serialization_result is None
engine2 = empty_engine()
deserialization_result = engine2.deserialize_from_file(path)
assert deserialization_result is None
def test_deserialize_corrupt(tmpdir):
path = str(tmpdir / "corrupt_cache.dat")
with open(path, "w", encoding="utf-8") as f:
f.write("abc")
engine = empty_engine()
with pytest.raises(adblock.DeserializationError):
engine.deserialize_from_file(path)
with pytest.raises(adblock.DeserializationError):
engine.deserialize(b"abc")
def test_serde():
engine = empty_engine()
serialization_result = engine.serialize()
assert isinstance(serialization_result, bytes)
engine2 = empty_engine()
deserialization_result = engine2.deserialize(serialization_result)
assert deserialization_result is None
| 29.106667
| 78
| 0.724233
| 259
| 2,183
| 5.864865
| 0.366795
| 0.050691
| 0.04345
| 0.045425
| 0.267281
| 0.154049
| 0.154049
| 0
| 0
| 0
| 0
| 0.005008
| 0.176821
| 2,183
| 74
| 79
| 29.5
| 0.840289
| 0.045809
| 0
| 0.148148
| 0
| 0
| 0.101442
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.092593
| false
| 0
| 0.037037
| 0.018519
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aa55947380a65f7c24093ff3b3feee2ac3b5948
| 1,048
|
py
|
Python
|
data_structures/stack/largest_rectangle_area_in_histogram.py
|
ruler30cm/python-ds
|
f84605c5b746ea1d46de3d00b86f5fba399445c7
|
[
"MIT"
] | 1,723
|
2019-07-30T07:06:22.000Z
|
2022-03-31T15:22:22.000Z
|
data_structures/stack/largest_rectangle_area_in_histogram.py
|
ruler30cm/python-ds
|
f84605c5b746ea1d46de3d00b86f5fba399445c7
|
[
"MIT"
] | 213
|
2019-10-06T08:07:47.000Z
|
2021-10-04T15:38:36.000Z
|
data_structures/stack/largest_rectangle_area_in_histogram.py
|
ruler30cm/python-ds
|
f84605c5b746ea1d46de3d00b86f5fba399445c7
|
[
"MIT"
] | 628
|
2019-10-06T10:26:25.000Z
|
2022-03-31T01:41:00.000Z
|
'''
Largest rectangle area in a histogram::
Find the largest rectangular area possible in a given histogram where the largest rectangle can be made of a number of contiguous bars.
For simplicity, assume that all bars have same width and the width is 1 unit.
'''
def max_area_histogram(histogram):
stack = list()
max_area = 0 # Initialize max area
index = 0
while index < len(histogram):
if (not stack) or (histogram[stack[-1]] <= histogram[index]):
stack.append(index)
index += 1
else:
top_of_stack = stack.pop()
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index))
max_area = max(max_area, area)
while stack:
top_of_stack = stack.pop()
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index))
max_area = max(max_area, area)
return max_area
hist = [4, 7, 1, 8, 4, 9, 5]
print("Maximum area is",
max_area_histogram(hist))
| 28.324324
| 136
| 0.603053
| 147
| 1,048
| 4.176871
| 0.380952
| 0.102606
| 0.065147
| 0.04886
| 0.29316
| 0.29316
| 0.29316
| 0.29316
| 0.29316
| 0.29316
| 0
| 0.021651
| 0.294847
| 1,048
| 36
| 137
| 29.111111
| 0.809202
| 0.262405
| 0
| 0.3
| 0
| 0
| 0.019634
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0
| 0
| 0.1
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aa72ed7ab8eb40be3928ae652b97a0368992b42
| 2,389
|
py
|
Python
|
auth_backend/src/key_op.py
|
cispa/bitahoy
|
ffc2004930a033cfb94d13671bc6068b473ce226
|
[
"MIT"
] | null | null | null |
auth_backend/src/key_op.py
|
cispa/bitahoy
|
ffc2004930a033cfb94d13671bc6068b473ce226
|
[
"MIT"
] | null | null | null |
auth_backend/src/key_op.py
|
cispa/bitahoy
|
ffc2004930a033cfb94d13671bc6068b473ce226
|
[
"MIT"
] | 2
|
2021-12-30T16:48:15.000Z
|
2022-01-14T14:21:15.000Z
|
import sys
import os
import psycopg2
import base64
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.hazmat.backends import default_backend
import time
if len(sys.argv) < 2:
print("Please enter either create or remove as a argv[1]")
sys.exit(0)
with psycopg2.connect("dbname='auth_db' user='auth_db' host='authdb' [redacted-2]") as conn:
with conn.cursor() as cursor:
if sys.argv[1] == "generate":
#Load the key or generate a new one:
cursor.execute("CREATE TABLE IF NOT EXISTS key (key varchar(4096),time bigint UNIQUE PRIMARY KEY)")
privkey = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
pem = privkey.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption())
cursor.execute("INSERT INTO key (key,time) VALUES('"+str(pem.decode("utf-8"))+"',"+str(int(time.time()))+")")
conn.commit()
print("New key generated!")
elif sys.argv[1] == "generate_if_needed":
#Load the key or generate a new one:
cursor.execute("CREATE TABLE IF NOT EXISTS key (key varchar(4096),time bigint UNIQUE PRIMARY KEY)")
cursor.execute("SELECT * FROM key")
res = cursor.fetchall()
if len(res) == 0:
privkey = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
pem = privkey.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption())
cursor.execute("INSERT INTO key (key,time) VALUES('"+str(pem.decode("utf-8"))+"',"+str(int(time.time()))+")")
conn.commit()
print("New key generated, as database was empty!")
else:
print("Database has key ready!")
elif sys.argv[1] == "drop":
cursor.execute("DROP TABLE key")
conn.commit()
print("Dropped old keys")
else:
print("Invalid option! Try 'drop', 'generate' or 'generate_if_needed'...")
| 45.942308
| 184
| 0.637505
| 286
| 2,389
| 5.251748
| 0.363636
| 0.051931
| 0.043941
| 0.04261
| 0.573901
| 0.573901
| 0.573901
| 0.573901
| 0.573901
| 0.573901
| 0
| 0.022124
| 0.243198
| 2,389
| 52
| 185
| 45.942308
| 0.808628
| 0.029301
| 0
| 0.342105
| 0
| 0
| 0.249784
| 0.009922
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.210526
| 0
| 0.210526
| 0.157895
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aa93bd0cfbc5bae7eaa0365dd95b7de863c0e17
| 653
|
py
|
Python
|
scripts/issue_param_value.py
|
Jhsmit/awesome-panel-extensions
|
41eba7cf84caa911be4ed0df2a96e16fc1e70263
|
[
"CC-BY-4.0"
] | 3
|
2020-07-16T07:28:45.000Z
|
2020-07-17T12:53:56.000Z
|
scripts/issue_param_value.py
|
MarcSkovMadsen/panel-extensions-template
|
f41ad8d8fb8502f87de3a4992917cbffb6299012
|
[
"CC-BY-4.0"
] | null | null | null |
scripts/issue_param_value.py
|
MarcSkovMadsen/panel-extensions-template
|
f41ad8d8fb8502f87de3a4992917cbffb6299012
|
[
"CC-BY-4.0"
] | null | null | null |
import panel as pn
import param
from awesome_panel_extensions.frameworks.fast import FastTemplate, FastTextInput
WIDGETS = {
"some_text": {"type": FastTextInput, "readonly": True, "sizing_mode": "fixed", "width": 400}
}
class ParameterizedApp(param.Parameterized):
some_text = param.String(default="This is some text")
view = param.Parameter()
def __init__(self, **params):
super().__init__(**params)
self.view = pn.Param(self, parameters=["some_text"], widgets=WIDGETS)
parameterized_app = ParameterizedApp()
paremeterized_template = FastTemplate(main=[parameterized_app.view])
paremeterized_template.servable()
| 27.208333
| 96
| 0.735069
| 73
| 653
| 6.328767
| 0.589041
| 0.069264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005338
| 0.139357
| 653
| 23
| 97
| 28.391304
| 0.816726
| 0
| 0
| 0
| 0
| 0
| 0.104135
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aab6b3ba732d64220b4fb1bf6b4cc739254d1fe
| 1,019
|
py
|
Python
|
tests/pm/update_sla.py
|
supsi-dacd-isaac/parity-sidechain-interface
|
b64a5fb724955332afb4998344081d1b93ac216a
|
[
"MIT"
] | null | null | null |
tests/pm/update_sla.py
|
supsi-dacd-isaac/parity-sidechain-interface
|
b64a5fb724955332afb4998344081d1b93ac216a
|
[
"MIT"
] | null | null | null |
tests/pm/update_sla.py
|
supsi-dacd-isaac/parity-sidechain-interface
|
b64a5fb724955332afb4998344081d1b93ac216a
|
[
"MIT"
] | null | null | null |
# Importing section
import json
import requests
import argparse
import hashlib
import time
from http import HTTPStatus
# Main
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
args = arg_parser.parse_args()
set_cmd = 'updateSla'
params = {
'idx': 'sla04',
'start': 3000,
'end': 3900
}
cmd_url = 'http://localhost:9119/%s' % set_cmd
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
print('COMMAND: %s' % cmd_url)
print('PARAMS: %s' % params)
r = requests.post(cmd_url, headers=headers, json=params)
data = json.loads(r.text)
print('RESPONSE: %s\n' % data)
# Wait some seconds to be sure that the transaction has been handled
time.sleep(5)
check_tx_url = 'http://localhost:9119/checkTx/%s' % data['tx_hash']
print('CHECK TX: %s' % check_tx_url)
r = requests.get(check_tx_url)
data = json.loads(r.text)
print('RESPONSE: %s\n' % data)
| 24.261905
| 80
| 0.617272
| 132
| 1,019
| 4.590909
| 0.5
| 0.046205
| 0.049505
| 0.066007
| 0.122112
| 0.122112
| 0.122112
| 0.122112
| 0.122112
| 0.122112
| 0
| 0.02474
| 0.24632
| 1,019
| 41
| 81
| 24.853659
| 0.764323
| 0.088322
| 0
| 0.142857
| 0
| 0
| 0.223784
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0.178571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aac1f4d092634d65b03e7c6699787370a84bac7
| 498
|
py
|
Python
|
array/python3/5_move_all_negative_elements.py
|
jitendragangwar123/cp
|
8d9da1abd841784da8304e7ebb64a6b94cb804bb
|
[
"MIT"
] | null | null | null |
array/python3/5_move_all_negative_elements.py
|
jitendragangwar123/cp
|
8d9da1abd841784da8304e7ebb64a6b94cb804bb
|
[
"MIT"
] | 1
|
2020-12-12T19:09:01.000Z
|
2020-12-12T19:09:01.000Z
|
array/python3/5_move_all_negative_elements.py
|
jitendragangwar123/cp
|
8d9da1abd841784da8304e7ebb64a6b94cb804bb
|
[
"MIT"
] | 1
|
2020-12-12T18:36:24.000Z
|
2020-12-12T18:36:24.000Z
|
def sort(arr):
# Start index 0.
start = 0
# End index
end = len(arr)-1
while start <= end:
# Swap all positive value with last index end & decrease end by 1.
if arr[start] >= 0:
arr[start], arr[end] = arr[end], arr[start]
end -= 1
else:
# If arr[start] is not positive then increase start by 1.
start += 1
if __name__ == "__main__":
arr = [-1, 2, -3, 4, 5, 6, -7, 8, 9]
sort(arr)
print(arr)
| 23.714286
| 74
| 0.5
| 74
| 498
| 3.256757
| 0.472973
| 0.165975
| 0.082988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05414
| 0.369478
| 498
| 20
| 75
| 24.9
| 0.713376
| 0.291165
| 0
| 0
| 0
| 0
| 0.022989
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.076923
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aac551e77cffa8d22df81867eace49a7797fd1d
| 1,199
|
py
|
Python
|
misc.py
|
hldai/wikiprocesspy
|
788ccb6f0e0e54a7322863d5a13332635afc240d
|
[
"MIT"
] | null | null | null |
misc.py
|
hldai/wikiprocesspy
|
788ccb6f0e0e54a7322863d5a13332635afc240d
|
[
"MIT"
] | null | null | null |
misc.py
|
hldai/wikiprocesspy
|
788ccb6f0e0e54a7322863d5a13332635afc240d
|
[
"MIT"
] | null | null | null |
import json
def __text_from_anchor_sents_file(anchor_sents_file, output_file):
f = open(anchor_sents_file, encoding='utf-8')
fout = open(output_file, 'w', encoding='utf-8', newline='\n')
for i, line in enumerate(f):
sent = json.loads(line)
fout.write('{}\n'.format(sent['tokens']))
# if i > 5:
# break
f.close()
fout.close()
def merge_files(filenames, output_file):
fout = open(output_file, 'w', encoding='utf-8', newline='\n')
for filename in filenames:
print(filename)
f = open(filename, encoding='utf-8')
for line in f:
fout.write(line)
f.close()
fout.close()
wiki19_anchor_sents_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents.txt'
anchor_sent_texts_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts.txt'
# __text_from_anchor_sents_file(wiki19_anchor_sents_file, anchor_sent_texts_file)
part_pos_tag_files = [f'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos-{i}.txt' for i in range(4)]
pos_tag_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos.txt'
# merge_files(part_pos_tag_files, pos_tag_file)
| 35.264706
| 118
| 0.686405
| 186
| 1,199
| 4.188172
| 0.27957
| 0.141207
| 0.115533
| 0.061617
| 0.441592
| 0.382542
| 0.382542
| 0.382542
| 0.382542
| 0.382542
| 0
| 0.042
| 0.165972
| 1,199
| 33
| 119
| 36.333333
| 0.737
| 0.120934
| 0
| 0.272727
| 0
| 0.136364
| 0.287893
| 0.253575
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.045455
| 0
| 0.136364
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aad4ce5dfa92a930b5b7dfb6e85c80cb8498743
| 2,833
|
py
|
Python
|
neural_toolbox/inception.py
|
ibrahimSouleiman/GuessWhat
|
60d140de1aae5ccda27e7d3eef2b9fb9548f0854
|
[
"Apache-2.0"
] | null | null | null |
neural_toolbox/inception.py
|
ibrahimSouleiman/GuessWhat
|
60d140de1aae5ccda27e7d3eef2b9fb9548f0854
|
[
"Apache-2.0"
] | null | null | null |
neural_toolbox/inception.py
|
ibrahimSouleiman/GuessWhat
|
60d140de1aae5ccda27e7d3eef2b9fb9548f0854
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.python.slim.nets.resnet_v1 as resnet_v1
import tensorflow.contrib.slim.python.slim.nets.inception_v1 as inception_v1
import tensorflow.contrib.slim.python.slim.nets.resnet_utils as slim_utils
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import arg_scope
import os
def get_resnet_arg_scope(bn_fn):
"""
Trick to apply CBN from a pretrained tf network. It overides the batchnorm constructor with cbn
:param bn_fn: cbn factory
:return: tensorflow scope
"""
with arg_scope(
[layers_lib.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=bn_fn,
normalizer_params=None) as arg_sc:
return arg_sc
def create_inception(image_input, is_training, scope="", inception_out="Mixed_5c", resnet_version=50, cbn=None):
"""
Create a resnet by overidding the classic batchnorm with conditional batchnorm
:param image_input: placeholder with image
:param is_training: are you using the resnet at training_time or test_time
:param scope: tensorflow scope
:param resnet_version: 50/101/152
:param cbn: the cbn factory
:return: the resnet output
"""
# assert False, "\n" \
# "There is a bug with classic batchnorm with slim networks (https://github.com/tensorflow/tensorflow/issues/4887). \n" \
# "Please use the following config -> 'cbn': {'use_cbn':true, 'excluded_scope_names': ['*']}"
# arg_sc = slim_utils.resnet_arg_scope(is_training=is_training)
# print("--- 1")
arg_sc = inception_v1.inception_v1_arg_scope()
# Pick the correct version of the resnet
# if resnet_version == 50:
# current_resnet = resnet_v1.resnet_v1_50
# elif resnet_version == 101:
# current_resnet = resnet_v1.resnet_v1_101
# elif resnet_version == 152:
# current_resnet = resnet_v1.resnet_v1_152
# else:
# raise ValueError("Unsupported resnet version")
# inception_scope = os.path.join('InceptionV1/InceptionV1', inception_out)
# print("--- 2")
inception_scope = inception_out
# print(" resnet_out = {} , resnet_scope = {}".format(resnet_out,resnet_scope))
# print("--- 3")
with slim.arg_scope(arg_sc):
net, end_points = inception_v1.inception_v1(image_input, 1001) # 1000 is the number of softmax class
print("Net = ",net)
# print("--- 4")
if len(scope) > 0 and not scope.endswith("/"):
scope += "/"
# print("--- 5")
# print(end_points)
print(" Batch ",inception_scope)
out = end_points[scope + inception_scope]
print("-- out Use: {},output = {}".format(inception_scope,out))
return out,end_points
| 36.320513
| 143
| 0.676668
| 378
| 2,833
| 4.857143
| 0.330688
| 0.034858
| 0.050109
| 0.058824
| 0.123094
| 0.123094
| 0.075708
| 0.075708
| 0
| 0
| 0
| 0.027953
| 0.217084
| 2,833
| 77
| 144
| 36.792208
| 0.79982
| 0.487116
| 0
| 0
| 0
| 0
| 0.035793
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.296296
| 0
| 0.444444
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aad74ee52655f68220f799efaffcbccdd0748ad
| 6,133
|
py
|
Python
|
timm/utils/checkpoint_saver.py
|
Robert-JunWang/pytorch-image-models
|
7c67d6aca992f039eece0af5f7c29a43d48c00e4
|
[
"Apache-2.0"
] | 17,769
|
2019-05-02T08:08:25.000Z
|
2022-03-31T22:14:44.000Z
|
timm/utils/checkpoint_saver.py
|
jonychoi/pytorch-image-models
|
e4360e6125bb0bb4279785810c8eb33b40af3ebd
|
[
"Apache-2.0"
] | 556
|
2019-05-26T16:31:37.000Z
|
2022-03-30T04:21:07.000Z
|
timm/utils/checkpoint_saver.py
|
jonychoi/pytorch-image-models
|
e4360e6125bb0bb4279785810c8eb33b40af3ebd
|
[
"Apache-2.0"
] | 3,029
|
2019-05-14T01:18:28.000Z
|
2022-03-31T20:09:50.000Z
|
""" Checkpoint Saver
Track top-n training checkpoints and maintain recovery checkpoints on specified intervals.
Hacked together by / Copyright 2020 Ross Wightman
"""
import glob
import operator
import os
import logging
import torch
from .model import unwrap_model, get_state_dict
_logger = logging.getLogger(__name__)
class CheckpointSaver:
def __init__(
self,
model,
optimizer,
args=None,
model_ema=None,
amp_scaler=None,
checkpoint_prefix='checkpoint',
recovery_prefix='recovery',
checkpoint_dir='',
recovery_dir='',
decreasing=False,
max_history=10,
unwrap_fn=unwrap_model):
# objects to save state_dicts of
self.model = model
self.optimizer = optimizer
self.args = args
self.model_ema = model_ema
self.amp_scaler = amp_scaler
# state
self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness
self.best_epoch = None
self.best_metric = None
self.curr_recovery_file = ''
self.last_recovery_file = ''
# config
self.checkpoint_dir = checkpoint_dir
self.recovery_dir = recovery_dir
self.save_prefix = checkpoint_prefix
self.recovery_prefix = recovery_prefix
self.extension = '.pth.tar'
self.decreasing = decreasing # a lower metric is better if True
self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs
self.max_history = max_history
self.unwrap_fn = unwrap_fn
assert self.max_history >= 1
def save_checkpoint(self, epoch, metric=None):
assert epoch >= 0
tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension)
last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension)
self._save(tmp_save_path, epoch, metric)
if os.path.exists(last_save_path):
os.unlink(last_save_path) # required for Windows support.
os.rename(tmp_save_path, last_save_path)
worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None
if (len(self.checkpoint_files) < self.max_history
or metric is None or self.cmp(metric, worst_file[1])):
if len(self.checkpoint_files) >= self.max_history:
self._cleanup_checkpoints(1)
filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension
save_path = os.path.join(self.checkpoint_dir, filename)
os.link(last_save_path, save_path)
self.checkpoint_files.append((save_path, metric))
self.checkpoint_files = sorted(
self.checkpoint_files, key=lambda x: x[1],
reverse=not self.decreasing) # sort in descending order if a lower metric is not better
checkpoints_str = "Current checkpoints:\n"
for c in self.checkpoint_files:
checkpoints_str += ' {}\n'.format(c)
_logger.info(checkpoints_str)
if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)):
self.best_epoch = epoch
self.best_metric = metric
best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension)
if os.path.exists(best_save_path):
os.unlink(best_save_path)
os.link(last_save_path, best_save_path)
return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch)
def _save(self, save_path, epoch, metric=None):
save_state = {
'epoch': epoch,
'arch': type(self.model).__name__.lower(),
'state_dict': get_state_dict(self.model, self.unwrap_fn),
'optimizer': self.optimizer.state_dict(),
'version': 2, # version < 2 increments epoch before save
}
if self.args is not None:
save_state['arch'] = self.args.model
save_state['args'] = self.args
if self.amp_scaler is not None:
save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict()
if self.model_ema is not None:
save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn)
if metric is not None:
save_state['metric'] = metric
torch.save(save_state, save_path)
def _cleanup_checkpoints(self, trim=0):
trim = min(len(self.checkpoint_files), trim)
delete_index = self.max_history - trim
if delete_index < 0 or len(self.checkpoint_files) <= delete_index:
return
to_delete = self.checkpoint_files[delete_index:]
for d in to_delete:
try:
_logger.debug("Cleaning checkpoint: {}".format(d))
os.remove(d[0])
except Exception as e:
_logger.error("Exception '{}' while deleting checkpoint".format(e))
self.checkpoint_files = self.checkpoint_files[:delete_index]
def save_recovery(self, epoch, batch_idx=0):
assert epoch >= 0
filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension
save_path = os.path.join(self.recovery_dir, filename)
self._save(save_path, epoch)
if os.path.exists(self.last_recovery_file):
try:
_logger.debug("Cleaning recovery: {}".format(self.last_recovery_file))
os.remove(self.last_recovery_file)
except Exception as e:
_logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file))
self.last_recovery_file = self.curr_recovery_file
self.curr_recovery_file = save_path
def find_recovery(self):
recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix)
files = glob.glob(recovery_path + '*' + self.extension)
files = sorted(files)
return files[0] if len(files) else ''
| 40.615894
| 104
| 0.626121
| 767
| 6,133
| 4.762712
| 0.191656
| 0.04599
| 0.072817
| 0.03285
| 0.261429
| 0.164796
| 0.121544
| 0.095812
| 0
| 0
| 0
| 0.004531
| 0.280287
| 6,133
| 150
| 105
| 40.886667
| 0.823063
| 0.073863
| 0
| 0.04878
| 0
| 0
| 0.044492
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 1
| 0.04878
| false
| 0
| 0.04878
| 0
| 0.130081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aad9dd74183fdbafeb45c7c06a4bb4ab92534aa
| 292
|
py
|
Python
|
AGC004/AGC004a.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
AGC004/AGC004a.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
AGC004/AGC004a.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
# AGC004a
def main():
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
a, b, c = map(int, input().split())
if a % 2 == 0 or b % 2 == 0 or c % 2 == 0:
print(0)
exit(0)
print(min(a*b, b*c, c*a))
if __name__ == '__main__':
main()
| 18.25
| 46
| 0.506849
| 48
| 292
| 2.916667
| 0.520833
| 0.042857
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069652
| 0.311644
| 292
| 15
| 47
| 19.466667
| 0.626866
| 0.023973
| 0
| 0
| 0
| 0
| 0.028269
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.181818
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab135f81cd0354b89240b44a37bacfa732bfab3
| 13,664
|
py
|
Python
|
qcore/asserts.py
|
corey-sobel/qcore
|
719a44617789e3cc384ce860031d9479ee0877e4
|
[
"Apache-2.0"
] | 1
|
2022-01-31T23:15:48.000Z
|
2022-01-31T23:15:48.000Z
|
qcore/asserts.py
|
corey-sobel/qcore
|
719a44617789e3cc384ce860031d9479ee0877e4
|
[
"Apache-2.0"
] | null | null | null |
qcore/asserts.py
|
corey-sobel/qcore
|
719a44617789e3cc384ce860031d9479ee0877e4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module with assertion helpers.
The advantages of using a method like
assert_eq(expected, actual)
instead of
assert expected == actual
include:
1 - On failures, assert_eq prints an informative message of the actual
values compared (e.g. AssertionError: 1 != 2) for free, which makes it
faster and easier to iterate on tests.
2 - In the context of refactors, basic asserts incorrectly shift the burden of
adding printouts and writing good test code to people refactoring code
rather than the person who initially wrote the code.
"""
__all__ = [
"assert_is",
"assert_is_not",
"assert_is_instance",
"assert_eq",
"assert_dict_eq",
"assert_ne",
"assert_gt",
"assert_ge",
"assert_lt",
"assert_le",
"assert_in",
"assert_not_in",
"assert_in_with_tolerance",
"assert_unordered_list_eq",
"assert_raises",
"AssertRaises",
# Strings
"assert_is_substring",
"assert_is_not_substring",
"assert_startswith",
"assert_endswith",
]
# The unittest.py testing framework checks for this variable in a module to
# filter out stack frames from that module from the test output, in order to
# make the output more concise.
# __unittest = 1
import traceback
from .inspection import get_full_name
_number_types = (int, float, complex)
def _assert_fail_message(message, expected, actual, comparison_str, extra):
if message:
return message
if extra:
return "%a %s %a (%s)" % (expected, comparison_str, actual, extra)
return "%a %s %a" % (expected, comparison_str, actual)
def assert_is(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is not actual."""
assert expected is actual, _assert_fail_message(
message, expected, actual, "is not", extra
)
def assert_is_not(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is actual."""
assert expected is not actual, _assert_fail_message(
message, expected, actual, "is", extra
)
def assert_is_instance(value, types, message=None, extra=None):
"""Raises an AssertionError if value is not an instance of type(s)."""
assert isinstance(value, types), _assert_fail_message(
message, value, types, "is not an instance of", extra
)
def assert_eq(expected, actual, message=None, tolerance=None, extra=None):
"""Raises an AssertionError if expected != actual.
If tolerance is specified, raises an AssertionError if either
- expected or actual isn't a number, or
- the difference between expected and actual is larger than the tolerance.
"""
if tolerance is None:
assert expected == actual, _assert_fail_message(
message, expected, actual, "!=", extra
)
else:
assert isinstance(tolerance, _number_types), (
"tolerance parameter to assert_eq must be a number: %a" % tolerance
)
assert isinstance(expected, _number_types) and isinstance(
actual, _number_types
), "parameters must be numbers when tolerance is specified: %a, %a" % (
expected,
actual,
)
diff = abs(expected - actual)
assert diff <= tolerance, _assert_fail_message(
message, expected, actual, "is more than %a away from" % tolerance, extra
)
def _dict_path_string(path):
if len(path) == 0:
return "(root)"
return "->".join(map(ascii, path))
def assert_dict_eq(expected, actual, number_tolerance=None, dict_path=[]):
"""Asserts that two dictionaries are equal, producing a custom message if they are not."""
assert_is_instance(expected, dict)
assert_is_instance(actual, dict)
expected_keys = set(expected.keys())
actual_keys = set(actual.keys())
assert expected_keys <= actual_keys, "Actual dict at %s is missing keys: %a" % (
_dict_path_string(dict_path),
expected_keys - actual_keys,
)
assert actual_keys <= expected_keys, "Actual dict at %s has extra keys: %a" % (
_dict_path_string(dict_path),
actual_keys - expected_keys,
)
for k in expected_keys:
key_path = dict_path + [k]
assert_is_instance(
actual[k],
type(expected[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
assert_is_instance(
expected[k],
type(actual[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
if isinstance(actual[k], dict):
assert_dict_eq(
expected[k],
actual[k],
number_tolerance=number_tolerance,
dict_path=key_path,
)
elif isinstance(actual[k], _number_types):
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
tolerance=number_tolerance,
)
else:
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
)
def assert_ne(expected, actual, message=None, tolerance=None, extra=None):
"""Raises an AssertionError if expected == actual.
If tolerance is specified, raises an AssertionError if either
- expected or actual isn't a number, or
- the difference between expected and actual is smaller than the tolerance.
"""
if tolerance is None:
assert expected != actual, _assert_fail_message(
message, expected, actual, "==", extra
)
else:
assert isinstance(tolerance, _number_types), (
"tolerance parameter to assert_eq must be a number: %a" % tolerance
)
assert isinstance(expected, _number_types) and isinstance(
actual, _number_types
), "parameters must be numbers when tolerance is specified: %a, %a" % (
expected,
actual,
)
diff = abs(expected - actual)
assert diff > tolerance, _assert_fail_message(
message, expected, actual, "is less than %a away from" % tolerance, extra
)
def assert_gt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand <= right_hand."""
assert left > right, _assert_fail_message(message, left, right, "<=", extra)
def assert_ge(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand < right_hand."""
assert left >= right, _assert_fail_message(message, left, right, "<", extra)
def assert_lt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand >= right_hand."""
assert left < right, _assert_fail_message(message, left, right, ">=", extra)
def assert_le(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand > right_hand."""
assert left <= right, _assert_fail_message(message, left, right, ">", extra)
def assert_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq."""
assert obj in seq, _assert_fail_message(message, obj, seq, "is not in", extra)
def assert_not_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is in iter."""
# for very long strings, provide a truncated error
if isinstance(seq, str) and obj in seq and len(seq) > 200:
index = seq.find(obj)
start_index = index - 50
if start_index > 0:
truncated = "(truncated) ..."
else:
truncated = ""
start_index = 0
end_index = index + len(obj) + 50
truncated += seq[start_index:end_index]
if end_index < len(seq):
truncated += "... (truncated)"
assert False, _assert_fail_message(message, obj, truncated, "is in", extra)
assert obj not in seq, _assert_fail_message(message, obj, seq, "is in", extra)
def assert_in_with_tolerance(obj, seq, tolerance, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq using assert_eq cmp."""
for i in seq:
try:
assert_eq(obj, i, tolerance=tolerance, message=message, extra=extra)
return
except AssertionError:
pass
assert False, _assert_fail_message(message, obj, seq, "is not in", extra)
def assert_unordered_list_eq(expected, actual, message=None):
"""Raises an AssertionError if the objects contained
in expected are not equal to the objects contained
in actual without regard to their order.
This takes quadratic time in the umber of elements in actual; don't use it for very long lists.
"""
missing_in_actual = []
missing_in_expected = list(actual)
for x in expected:
try:
missing_in_expected.remove(x)
except ValueError:
missing_in_actual.append(x)
if missing_in_actual or missing_in_expected:
if not message:
message = (
"%a not equal to %a; missing items: %a in expected, %a in actual."
% (expected, actual, missing_in_expected, missing_in_actual)
)
assert False, message
def assert_raises(fn, *expected_exception_types):
"""Raises an AssertionError if calling fn does not raise one of the expected_exception-types."""
with AssertRaises(*expected_exception_types):
fn()
class AssertRaises(object):
"""With-context that asserts that the code within the context raises the specified exception."""
def __init__(self, *expected_exception_types, **kwargs):
# when you don't specify the exception expected, it's easy to write buggy tests that appear
# to pass but actually throw an exception different from the expected one
assert (
len(expected_exception_types) >= 1
), "You must specify the exception type when using AssertRaises"
self.expected_exception_types = set(expected_exception_types)
self.expected_exception_found = None
self.extra = kwargs.pop("extra", None)
assert_eq({}, kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type in self.expected_exception_types:
# Return True to suppress the Exception if the type matches. For details,
# see: http://docs.python.org/release/2.5.2/lib/typecontextmanager.html
self.expected_exception_found = exc_val
return True
for t in self.expected_exception_types:
if isinstance(exc_val, t):
self.expected_exception_found = exc_val
return True
expected = ", ".join(map(get_full_name, self.expected_exception_types))
if exc_type is None:
message = "No exception raised, but expected: %s" % expected
if self.extra is not None:
message += " (%s)" % self.extra
else:
template = (
"{TYPE}: {VAL} is raised, but expected:"
" {EXPECTED}{EXTRA_STR}\n\n{STACK}"
)
message = template.format(
TYPE=get_full_name(exc_type),
VAL=exc_val,
EXPECTED=expected,
STACK="".join(traceback.format_tb(exc_tb)),
EXTRA_STR=(" (%s)" % self.extra) if self.extra is not None else "",
)
raise AssertionError(message)
# ===================================================
# Strings
# ===================================================
def assert_is_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is not a substring of subject."""
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) != -1)
), _assert_fail_message(message, substring, subject, "is not in", extra)
def assert_is_not_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is a substring of subject."""
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) == -1)
), _assert_fail_message(message, substring, subject, "is in", extra)
def assert_startswith(prefix, subject, message=None, extra=None):
"""Raises an AssertionError if the subject string does not start with prefix."""
assert (
(type(subject) is str)
and (type(prefix) is str)
and (subject.startswith(prefix))
), _assert_fail_message(message, subject, prefix, "does not start with", extra)
def assert_endswith(suffix, subject, message=None, extra=None):
"""Raises an AssertionError if the subject string does not end with suffix."""
assert (
(type(subject) is str) and (type(suffix) is str) and (subject.endswith(suffix))
), _assert_fail_message(message, subject, suffix, "does not end with", extra)
| 34.592405
| 100
| 0.640222
| 1,733
| 13,664
| 4.875361
| 0.171379
| 0.039768
| 0.040241
| 0.056811
| 0.464079
| 0.420168
| 0.405018
| 0.378861
| 0.352823
| 0.347852
| 0
| 0.00286
| 0.257904
| 13,664
| 394
| 101
| 34.680203
| 0.830375
| 0.259368
| 0
| 0.233333
| 0
| 0.004167
| 0.116202
| 0.010372
| 0
| 0
| 0
| 0
| 0.3625
| 1
| 0.1
| false
| 0.004167
| 0.008333
| 0.004167
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab21446cecd0d46b1a47275470353f326cec4d7
| 6,318
|
py
|
Python
|
src/python/pants/core/goals/check_test.py
|
yoav-orca/pants
|
995448e9add343975844c7a43d5d64618fc4e4d9
|
[
"Apache-2.0"
] | 1,806
|
2015-01-05T07:31:00.000Z
|
2022-03-31T11:35:41.000Z
|
src/python/pants/core/goals/check_test.py
|
yoav-orca/pants
|
995448e9add343975844c7a43d5d64618fc4e4d9
|
[
"Apache-2.0"
] | 9,565
|
2015-01-02T19:01:59.000Z
|
2022-03-31T23:25:16.000Z
|
src/python/pants/core/goals/check_test.py
|
riisi/pants
|
b33327389fab67c47b919710ea32f20ca284b1a6
|
[
"Apache-2.0"
] | 443
|
2015-01-06T20:17:57.000Z
|
2022-03-31T05:28:17.000Z
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from abc import ABCMeta, abstractmethod
from pathlib import Path
from textwrap import dedent
from typing import ClassVar, Iterable, List, Optional, Tuple, Type
from pants.core.goals.check import Check, CheckRequest, CheckResult, CheckResults, check
from pants.core.util_rules.distdir import DistDir
from pants.engine.addresses import Address
from pants.engine.fs import Workspace
from pants.engine.target import FieldSet, MultipleSourcesField, Target, Targets
from pants.engine.unions import UnionMembership
from pants.testutil.option_util import create_options_bootstrapper
from pants.testutil.rule_runner import MockGet, RuleRunner, mock_console, run_rule_with_mocks
from pants.util.logging import LogLevel
class MockTarget(Target):
alias = "mock_target"
core_fields = (MultipleSourcesField,)
class MockCheckFieldSet(FieldSet):
required_fields = (MultipleSourcesField,)
class MockCheckRequest(CheckRequest, metaclass=ABCMeta):
field_set_type = MockCheckFieldSet
checker_name: ClassVar[str]
@staticmethod
@abstractmethod
def exit_code(_: Iterable[Address]) -> int:
pass
@property
def check_results(self) -> CheckResults:
addresses = [config.address for config in self.field_sets]
return CheckResults(
[
CheckResult(
self.exit_code(addresses),
"",
"",
)
],
checker_name=self.checker_name,
)
class SuccessfulRequest(MockCheckRequest):
checker_name = "SuccessfulChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return 0
class FailingRequest(MockCheckRequest):
checker_name = "FailingChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return 1
class ConditionallySucceedsRequest(MockCheckRequest):
checker_name = "ConditionallySucceedsChecker"
@staticmethod
def exit_code(addresses: Iterable[Address]) -> int:
if any(address.target_name == "bad" for address in addresses):
return 127
return 0
class SkippedRequest(MockCheckRequest):
@staticmethod
def exit_code(_) -> int:
return 0
@property
def check_results(self) -> CheckResults:
return CheckResults([], checker_name="SkippedChecker")
class InvalidField(MultipleSourcesField):
pass
class InvalidFieldSet(MockCheckFieldSet):
required_fields = (InvalidField,)
class InvalidRequest(MockCheckRequest):
field_set_type = InvalidFieldSet
checker_name = "InvalidChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return -1
def make_target(address: Optional[Address] = None) -> Target:
if address is None:
address = Address("", target_name="tests")
return MockTarget({}, address)
def run_typecheck_rule(
*, request_types: List[Type[CheckRequest]], targets: List[Target]
) -> Tuple[int, str]:
union_membership = UnionMembership({CheckRequest: request_types})
with mock_console(create_options_bootstrapper()) as (console, stdio_reader):
rule_runner = RuleRunner()
result: Check = run_rule_with_mocks(
check,
rule_args=[
console,
Workspace(rule_runner.scheduler, _enforce_effects=False),
Targets(targets),
DistDir(relpath=Path("dist")),
union_membership,
],
mock_gets=[
MockGet(
output_type=CheckResults,
input_type=CheckRequest,
mock=lambda field_set_collection: field_set_collection.check_results,
),
],
union_membership=union_membership,
)
assert not stdio_reader.get_stdout()
return result.exit_code, stdio_reader.get_stderr()
def test_invalid_target_noops() -> None:
exit_code, stderr = run_typecheck_rule(request_types=[InvalidRequest], targets=[make_target()])
assert exit_code == 0
assert stderr == ""
def test_summary() -> None:
good_address = Address("", target_name="good")
bad_address = Address("", target_name="bad")
exit_code, stderr = run_typecheck_rule(
request_types=[
ConditionallySucceedsRequest,
FailingRequest,
SkippedRequest,
SuccessfulRequest,
],
targets=[make_target(good_address), make_target(bad_address)],
)
assert exit_code == FailingRequest.exit_code([bad_address])
assert stderr == dedent(
"""\
𐄂 ConditionallySucceedsChecker failed.
𐄂 FailingChecker failed.
- SkippedChecker skipped.
✓ SuccessfulChecker succeeded.
"""
)
def test_streaming_output_skip() -> None:
results = CheckResults([], checker_name="typechecker")
assert results.level() == LogLevel.DEBUG
assert results.message() == "typechecker skipped."
def test_streaming_output_success() -> None:
results = CheckResults([CheckResult(0, "stdout", "stderr")], checker_name="typechecker")
assert results.level() == LogLevel.INFO
assert results.message() == dedent(
"""\
typechecker succeeded.
stdout
stderr
"""
)
def test_streaming_output_failure() -> None:
results = CheckResults([CheckResult(18, "stdout", "stderr")], checker_name="typechecker")
assert results.level() == LogLevel.ERROR
assert results.message() == dedent(
"""\
typechecker failed (exit code 18).
stdout
stderr
"""
)
def test_streaming_output_partitions() -> None:
results = CheckResults(
[
CheckResult(21, "", "", partition_description="ghc8.1"),
CheckResult(0, "stdout", "stderr", partition_description="ghc9.2"),
],
checker_name="typechecker",
)
assert results.level() == LogLevel.ERROR
assert results.message() == dedent(
"""\
typechecker failed (exit code 21).
Partition #1 - ghc8.1:
Partition #2 - ghc9.2:
stdout
stderr
"""
)
| 28.459459
| 99
| 0.650681
| 615
| 6,318
| 6.495935
| 0.273171
| 0.030038
| 0.016521
| 0.028786
| 0.198999
| 0.182728
| 0.138924
| 0.126909
| 0.094118
| 0.052065
| 0
| 0.007407
| 0.252137
| 6,318
| 221
| 100
| 28.588235
| 0.83746
| 0.019943
| 0
| 0.212329
| 0
| 0
| 0.04019
| 0.004914
| 0
| 0
| 0
| 0
| 0.089041
| 1
| 0.109589
| false
| 0.013699
| 0.089041
| 0.034247
| 0.410959
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab219191a7ea6ce5d831e0b7655a8775e4ac26e
| 9,851
|
py
|
Python
|
data-processing/entities/definitions/model/utils.py
|
alexkreidler/scholarphi
|
86d26d0bfa5ded00760fba1a9c6891a94a3dd6d2
|
[
"Apache-2.0"
] | null | null | null |
data-processing/entities/definitions/model/utils.py
|
alexkreidler/scholarphi
|
86d26d0bfa5ded00760fba1a9c6891a94a3dd6d2
|
[
"Apache-2.0"
] | null | null | null |
data-processing/entities/definitions/model/utils.py
|
alexkreidler/scholarphi
|
86d26d0bfa5ded00760fba1a9c6891a94a3dd6d2
|
[
"Apache-2.0"
] | 1
|
2020-10-23T12:36:11.000Z
|
2020-10-23T12:36:11.000Z
|
import os
import random
from typing import Any, Dict, List, Union
import numpy as np
import torch
from colorama import Fore, Style
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.metrics import precision_score, recall_score
def highlight(input_: Any) -> str:
input_ = str(input_)
return str(Fore.YELLOW + str(input_) + Style.RESET_ALL)
def get_intent_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.intent_label_file), "r", encoding="utf-8"
)
]
def get_slot_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.slot_label_file), "r", encoding="utf-8"
)
]
def get_pos_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.pos_label_file), "r", encoding="utf-8"
)
]
def set_torch_seed(seed: Any, no_cuda: bool) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # type: ignore
if not no_cuda and torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) # type: ignore
def compute_metrics(
intent_preds: List[str],
intent_labels: List[str],
slot_preds: List[List[str]],
slot_labels: List[List[str]],
) -> Dict[Any, Any]:
assert (
len(intent_preds) == len(intent_labels) == len(slot_preds) == len(slot_labels)
)
results: Dict[Any, Any] = {}
intent_result = get_intent_acc(intent_preds, intent_labels)
slot_result = get_slot_metrics(slot_preds, slot_labels)
sementic_result = get_sentence_frame_acc(
intent_preds, intent_labels, slot_preds, slot_labels
)
# New metrics added following Dan's request.
slot_simple_result = get_slot_simple_metrics(slot_preds, slot_labels)
partial_match_result = get_partial_match_metrics(slot_preds, slot_labels)
results.update(intent_result)
results.update(slot_result)
results.update(sementic_result)
results.update(slot_simple_result)
results.update(partial_match_result)
return results
def simplify_tokens(preds: List[str]) -> List[str]:
simple_preds = []
for p in preds:
if p.endswith("TERM"):
simple_preds.append("TERM")
elif p.endswith("DEF"):
simple_preds.append("DEF")
else:
simple_preds.append(p)
return simple_preds
def get_partial_match_metrics(
preds: List[List[str]], labels: List[List[str]]
) -> Dict[Any, Any]:
"""
Suppose there are N such pairs in the gold data and the system predicts M such pairs. Say a ‘partial match’ happens when the system predicts a pair <term,defn> and there is some overlap (at least one token) between the predicted and gold term spans AND there is some overlap between the predicted and gold definition spans. Let X be the number of partial matches. What are
Partial match precision = P/M
Partial match recall = P/N
"""
assert len(preds) == len(labels)
both_in_preds, both_in_labels = [], []
partial_matches, exact_matches = [], []
for pred_sent, label_sent in zip(preds, labels):
simple_pred_sent = simplify_tokens(pred_sent)
simple_label_sent = simplify_tokens(label_sent)
# check whether term/def exist together
both_in_pred = "TERM" in simple_pred_sent and "DEF" in simple_pred_sent
both_in_label = "TERM" in simple_label_sent and "DEF" in simple_label_sent
both_in_preds.append(both_in_pred)
both_in_labels.append(both_in_label)
partial_match = False
exact_match = False
match: List[Union[str, bool]] = []
if both_in_pred and both_in_label:
for p, l in zip(simple_pred_sent, simple_label_sent):
if p == l:
match.append(p)
else:
match.append(False)
if "TERM" in match and "DEF" in match:
partial_match = True
if False not in match:
exact_match = True
partial_matches.append(partial_match)
exact_matches.append(exact_match)
count_both_in_preds = sum(both_in_preds) # N
count_both_in_labels = sum(both_in_labels) # M
count_partial_matches = sum(partial_matches) # P
count_exact_matches = sum(exact_matches) # E
partial_precision = count_partial_matches / count_both_in_preds
partial_recall = count_partial_matches / count_both_in_labels
partial_fscore = (
2 * partial_precision * partial_recall / (partial_precision + partial_recall)
)
exact_precision = count_exact_matches / count_both_in_preds
exact_recall = count_exact_matches / count_both_in_labels
exact_fscore = 2 * exact_precision * exact_recall / (exact_precision + exact_recall)
return {
"partial_match_precision": partial_precision,
"partial_match_recall": partial_recall,
"partial_match_f1": partial_fscore,
"exact_match_precision": exact_precision,
"excat_match_recall": exact_recall,
"excat_match_f1": exact_fscore,
}
def get_slot_simple_metrics(
preds: List[List[str]], labels: List[List[str]]
) -> Dict[Any, Any]:
"""
Conceptually, define the following new types of ‘virtual tags’
TERM = B-term OR I-Term (ie the union of those two tags)
DEF = B-Def OR I-Def
Now, what are the P,R & F1 numbers for TERM and DEF? (I think these matter because users may just care about accuracy of term and defn matching and the macro averaged scores conflate other things like recall on these metrics and precision on O. Likewise the current macro average treats missing the first word in a definition differently from skipping the last word.
"""
assert len(preds) == len(labels)
# flatten
preds_flattened = [p for ps in preds for p in ps]
labels_flattened = [l for ls in labels for l in ls]
# simplify by replacing {B,I}-TERM to TERM and {B,I}-DEF to DEF
simple_preds = simplify_tokens(preds_flattened)
simple_labels = simplify_tokens(labels_flattened)
assert len(simple_preds) == len(simple_labels)
label_names = ["O", "TERM", "DEF"]
p, r, f, s = score(simple_labels, simple_preds, average=None, labels=label_names)
s = [int(si) for si in s]
p = [round(float(pi), 3) for pi in p]
r = [round(float(pi), 3) for pi in r]
f = [round(float(pi), 3) for pi in f]
per_class = {"p": list(p), "r": list(r), "f": list(f), "s": list(s)}
# pprint(per_class)
return {
"slot_merged_TERM_precision": per_class["p"][1],
"slot_merged_TERM_recall": per_class["r"][1],
"slot_merged_TERM_f1": per_class["f"][1],
"slot_merged_DEFINITION_precision": per_class["p"][2],
"slot_merged_DEFINITION_recall": per_class["r"][2],
"slot_merged_DEFINITION_f1": per_class["f"][2],
}
def get_slot_metrics(preds: List[List[str]], labels: List[List[str]]) -> Dict[Any, Any]:
assert len(preds) == len(labels)
# flatten
preds_flattened = [p for ps in preds for p in ps]
labels_flattened = [l for ls in labels for l in ls]
macro_f1 = f1_score(labels_flattened, preds_flattened, average="macro")
micro_f1 = f1_score(labels_flattened, preds_flattened, average="micro")
macro_p = precision_score(labels_flattened, preds_flattened, average="macro")
micro_p = precision_score(labels_flattened, preds_flattened, average="micro")
macro_r = recall_score(labels_flattened, preds_flattened, average="macro")
micro_r = recall_score(labels_flattened, preds_flattened, average="micro")
label_names = ["O", "B-TERM", "I-TERM", "B-DEF", "I-DEF"]
p, r, f, s = score(
labels_flattened, preds_flattened, average=None, labels=label_names
)
s = [int(si) for si in s]
p = [round(float(pi), 3) for pi in p]
r = [round(float(pi), 3) for pi in r]
f = [round(float(pi), 3) for pi in f]
per_class = {"p": list(p), "r": list(r), "f": list(f), "s": list(s)}
# print(per_class)
return {
"slot_precision_macro": macro_p,
"slot_recall_macro": macro_r,
"slot_f1_macro": macro_f1,
"slot_precision_micro": micro_p,
"slot_recall_micro": micro_r,
"slot_f1_micro": micro_f1,
"slot_precision_per_label": per_class["p"],
"slot_recal_per_label": per_class["r"],
"slot_f1_per_label": per_class["f"],
"slot_num_per_label": per_class["s"],
}
def get_intent_acc(preds: List[str], labels: List[str]) -> Dict[Any, Any]:
acc = (preds == labels).mean()
return {"intent_acc": acc}
def read_prediction_text(args: Any) -> List[str]:
return [
text.strip()
for text in open(
os.path.join(args.pred_dir, args.pred_input_file), "r", encoding="utf-8"
)
]
def get_sentence_frame_acc(
intent_preds: List[str],
intent_labels: List[str],
slot_preds: List[List[str]],
slot_labels: List[List[str]],
) -> Dict[Any, Any]:
"""For the cases that intent and all the slots are correct (in one sentence)"""
# Get the intent comparison result
intent_result = intent_preds == intent_labels
# Get the slot comparision result
slot_result = []
for preds, labels in zip(slot_preds, slot_labels):
assert len(preds) == len(labels)
one_sent_result = True
for p, l in zip(preds, labels):
if p != l:
one_sent_result = False
break
slot_result.append(one_sent_result)
slot_result = np.array(slot_result)
sementic_acc = np.multiply(intent_result, slot_result).mean()
return {"sementic_frame_acc": sementic_acc}
| 35.952555
| 376
| 0.66308
| 1,418
| 9,851
| 4.35402
| 0.158674
| 0.024943
| 0.017817
| 0.028345
| 0.360382
| 0.298996
| 0.241173
| 0.232588
| 0.168772
| 0.167315
| 0
| 0.004346
| 0.229114
| 9,851
| 273
| 377
| 36.084249
| 0.808665
| 0.132575
| 0
| 0.230392
| 0
| 0
| 0.072094
| 0.023953
| 0
| 0
| 0
| 0
| 0.029412
| 1
| 0.063725
| false
| 0
| 0.044118
| 0.019608
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab2ef53d9a0815c477ae2435981a3a0029d019b
| 11,463
|
py
|
Python
|
fire/trace.py
|
nvhoang55/python-fire
|
b78287f6d68208732ca4d91e57f4678e6c4747c7
|
[
"Apache-2.0"
] | null | null | null |
fire/trace.py
|
nvhoang55/python-fire
|
b78287f6d68208732ca4d91e57f4678e6c4747c7
|
[
"Apache-2.0"
] | null | null | null |
fire/trace.py
|
nvhoang55/python-fire
|
b78287f6d68208732ca4d91e57f4678e6c4747c7
|
[
"Apache-2.0"
] | 1
|
2022-01-17T08:35:09.000Z
|
2022-01-17T08:35:09.000Z
|
# Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module has classes for tracing the execution of a Fire execution.
A FireTrace consists of a sequence of FireTraceElement objects. Each element
represents an action taken by Fire during a single Fire execution. An action may
be instantiating a class, calling a routine, or accessing a property.
Each action consumes args and results in a new component. The final component
is serialized to stdout by Fire as well as returned by the Fire method. If
a Fire usage error occurs, such as insufficient arguments being provided to call
a function, then that error will be captured in the trace and the final
component will be None.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pipes
from fire import inspectutils
INITIAL_COMPONENT = 'Initial component'
INSTANTIATED_CLASS = 'Instantiated class'
CALLED_ROUTINE = 'Called routine'
CALLED_CALLABLE = 'Called callable'
ACCESSED_PROPERTY = 'Accessed property'
COMPLETION_SCRIPT = 'Generated completion script'
INTERACTIVE_MODE = 'Entered interactive mode'
class FireTrace(object):
"""A FireTrace represents the steps taken during a single Fire execution.
A FireTrace consists of a sequence of FireTraceElement objects. Each element
represents an action taken by Fire during a single Fire execution. An action
may be instantiating a class, calling a routine, or accessing a property.
"""
def __init__(self, initial_component, name=None, separator='-', verbose=False,
show_help=False, show_trace=False):
initial_trace_element = FireTraceElement(
component=initial_component,
action=INITIAL_COMPONENT,
)
self.name = name
self.separator = separator
self.elements = [initial_trace_element]
self.verbose = verbose
self.show_help = show_help
self.show_trace = show_trace
def GetResult(self):
"""Returns the component from the last element of the trace."""
# pytype: disable=attribute-error
return self.GetLastHealthyElement().component
# pytype: enable=attribute-error
def GetLastHealthyElement(self):
"""Returns the last element of the trace that is not an error.
This element will contain the final component indicated by the trace.
Returns:
The last element of the trace that is not an error.
"""
for element in reversed(self.elements):
if not element.HasError():
return element
return None
def HasError(self):
"""Returns whether the Fire execution encountered a Fire usage error."""
return self.elements[-1].HasError()
def AddAccessedProperty(self, component, target, args, filename, lineno):
element = FireTraceElement(
component=component,
action=ACCESSED_PROPERTY,
target=target,
args=args,
filename=filename,
lineno=lineno,
)
self.elements.append(element)
def AddCalledComponent(self, component, target, args, filename, lineno,
capacity, action=CALLED_CALLABLE):
"""Adds an element to the trace indicating that a component was called.
Also applies to instantiating a class.
Args:
component: The result of calling the callable.
target: The name of the callable.
args: The args consumed in order to call this callable.
filename: The file in which the callable is defined, or None if N/A.
lineno: The line number on which the callable is defined, or None if N/A.
capacity: (bool) Whether the callable could have accepted additional args.
action: The value to include as the action in the FireTraceElement.
"""
element = FireTraceElement(
component=component,
action=action,
target=target,
args=args,
filename=filename,
lineno=lineno,
capacity=capacity,
)
self.elements.append(element)
def AddCompletionScript(self, script):
element = FireTraceElement(
component=script,
action=COMPLETION_SCRIPT,
)
self.elements.append(element)
def AddInteractiveMode(self):
element = FireTraceElement(action=INTERACTIVE_MODE)
self.elements.append(element)
def AddError(self, error, args):
element = FireTraceElement(error=error, args=args)
self.elements.append(element)
def AddSeparator(self):
"""Marks that the most recent element of the trace used a separator.
A separator is an argument you can pass to a Fire CLI to separate args left
of the separator from args right of the separator.
Here's an example to demonstrate the separator. Let's say you have a
function that takes a variable number of args, and you want to call that
function, and then upper case the result. Here's how to do it:
# in Python
def display(arg1, arg2='!'):
return arg1 + arg2
# from Bash (the default separator is the hyphen -)
display hello # hello!
display hello upper # helloupper
display hello - upper # HELLO!
Note how the separator caused the display function to be called with the
default value for arg2.
"""
self.elements[-1].AddSeparator()
def _Quote(self, arg):
if arg.startswith('--') and '=' in arg:
prefix, value = arg.split('=', 1)
return pipes.quote(prefix) + '=' + pipes.quote(value)
return pipes.quote(arg)
def GetCommand(self, include_separators=True):
"""Returns the command representing the trace up to this point.
Args:
include_separators: Whether or not to include separators in the command.
Returns:
A string representing a Fire CLI command that would produce this trace.
"""
args = []
if self.name:
args.append(self.name)
for element in self.elements:
if element.HasError():
continue
if element.args:
args.extend(element.args)
if element.HasSeparator() and include_separators:
args.append(self.separator)
if self.NeedsSeparator() and include_separators:
args.append(self.separator)
return ' '.join(self._Quote(arg) for arg in args)
def NeedsSeparator(self):
"""Returns whether a separator should be added to the command.
If the command is a function call, then adding an additional argument to the
command sometimes would add an extra arg to the function call, and sometimes
would add an arg acting on the result of the function call.
This function tells us whether we should add a separator to the command
before adding additional arguments in order to make sure the arg is applied
to the result of the function call, and not the function call itself.
Returns:
Whether a separator should be added to the command if order to keep the
component referred to by the command the same when adding additional args.
"""
element = self.GetLastHealthyElement()
return element.HasCapacity() and not element.HasSeparator()
def __str__(self):
lines = []
for index, element in enumerate(self.elements):
line = '{index}. {trace_string}'.format(
index=index + 1,
trace_string=element,
)
lines.append(line)
return '\n'.join(lines)
def NeedsSeparatingHyphenHyphen(self, flag='help'):
"""Returns whether a the trace need '--' before '--help'.
'--' is needed when the component takes keyword arguments, when the value of
flag matches one of the argument of the component, or the component takes in
keyword-only arguments(e.g. argument with default value).
Args:
flag: the flag available for the trace
Returns:
True for needed '--', False otherwise.
"""
element = self.GetLastHealthyElement()
component = element.component
spec = inspectutils.GetFullArgSpec(component)
return (spec.varkw is not None
or flag in spec.args
or flag in spec.kwonlyargs)
class FireTraceElement(object):
"""A FireTraceElement represents a single step taken by a Fire execution.
Examples of a FireTraceElement are the instantiation of a class or the
accessing of an object member.
"""
def __init__(self,
component=None,
action=None,
target=None,
args=None,
filename=None,
lineno=None,
error=None,
capacity=None):
"""Instantiates a FireTraceElement.
Args:
component: The result of this element of the trace.
action: The type of action (eg instantiating a class) taking place.
target: (string) The name of the component being acted upon.
args: The args consumed by the represented action.
filename: The file in which the action is defined, or None if N/A.
lineno: The line number on which the action is defined, or None if N/A.
error: The error represented by the action, or None if N/A.
capacity: (bool) Whether the action could have accepted additional args.
"""
self.component = component
self._action = action
self._target = target
self.args = args
self._filename = filename
self._lineno = lineno
self._error = error
self._separator = False
self._capacity = capacity
def HasError(self):
return self._error is not None
def HasCapacity(self):
return self._capacity
def HasSeparator(self):
return self._separator
def AddSeparator(self):
self._separator = True
def ErrorAsStr(self):
return ' '.join(str(arg) for arg in self._error.args)
def __str__(self):
if self.HasError():
return self.ErrorAsStr()
else:
# Format is: {action} "{target}" ({filename}:{lineno})
string = self._action
if self._target is not None:
string += ' "{target}"'.format(target=self._target)
if self._filename is not None:
path = self._filename
if self._lineno is not None:
path += ':{lineno}'.format(lineno=self._lineno)
string += ' ({path})'.format(path=path)
return string
| 36.275316
| 84
| 0.640932
| 1,400
| 11,463
| 5.19
| 0.22
| 0.009634
| 0.008258
| 0.011698
| 0.21346
| 0.164327
| 0.139004
| 0.127168
| 0.113955
| 0.106524
| 0
| 0.002093
| 0.291547
| 11,463
| 315
| 85
| 36.390476
| 0.892624
| 0.449795
| 0
| 0.186667
| 0
| 0
| 0.034761
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146667
| false
| 0
| 0.033333
| 0.026667
| 0.3
| 0.006667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab36187809db9e1ba202abbdfa4e21a0d5b6dfb
| 33,549
|
py
|
Python
|
test/unit/__init__.py
|
thiagodasilva/swift
|
0553d9333ed0045c4d209065b315533a33e5d7d7
|
[
"Apache-2.0"
] | null | null | null |
test/unit/__init__.py
|
thiagodasilva/swift
|
0553d9333ed0045c4d209065b315533a33e5d7d7
|
[
"Apache-2.0"
] | null | null | null |
test/unit/__init__.py
|
thiagodasilva/swift
|
0553d9333ed0045c4d209065b315533a33e5d7d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Swift tests """
from __future__ import print_function
import os
import copy
import logging
import errno
from six.moves import range
import sys
from contextlib import contextmanager, closing
from collections import defaultdict, Iterable
import itertools
from numbers import Number
from tempfile import NamedTemporaryFile
import time
import eventlet
from eventlet.green import socket
from tempfile import mkdtemp
from shutil import rmtree
from swift.common.utils import Timestamp, NOTICE
from test import get_config
from swift.common import swob, utils
from swift.common.ring import Ring, RingData
from hashlib import md5
import logging.handlers
from six.moves.http_client import HTTPException
from swift.common import storage_policy
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
VALID_EC_TYPES)
import functools
import six.moves.cPickle as pickle
from gzip import GzipFile
import mock as mocklib
import inspect
EMPTY_ETAG = md5().hexdigest()
# try not to import this module from swift
if not os.path.basename(sys.argv[0]).startswith('swift'):
# never patch HASH_PATH_SUFFIX AGAIN!
utils.HASH_PATH_SUFFIX = 'endcap'
EC_TYPE_PREFERENCE = [
'liberasurecode_rs_vand',
'jerasure_rs_vand',
]
for eclib_name in EC_TYPE_PREFERENCE:
if eclib_name in VALID_EC_TYPES:
break
else:
raise SystemExit('ERROR: unable to find suitable PyECLib type'
' (none of %r found in %r)' % (
EC_TYPE_PREFERENCE,
VALID_EC_TYPES,
))
DEFAULT_TEST_EC_TYPE = eclib_name
def patch_policies(thing_or_policies=None, legacy_only=False,
with_ec_default=False, fake_ring_args=None):
if isinstance(thing_or_policies, (
Iterable, storage_policy.StoragePolicyCollection)):
return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)
if legacy_only:
default_policies = [
StoragePolicy(0, name='legacy', is_default=True),
]
default_ring_args = [{}]
elif with_ec_default:
default_policies = [
ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10,
ec_nparity=4, ec_segment_size=4096),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{'replicas': 14}, {}]
else:
default_policies = [
StoragePolicy(0, name='nulo', is_default=True),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{}, {}]
fake_ring_args = fake_ring_args or default_ring_args
decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)
if not thing_or_policies:
return decorator
else:
# it's a thing, we return the wrapped thing instead of the decorator
return decorator(thing_or_policies)
class PatchPolicies(object):
"""
Why not mock.patch? In my case, when used as a decorator on the class it
seemed to patch setUp at the wrong time (i.e. in setup the global wasn't
patched yet)
"""
def __init__(self, policies, fake_ring_args=None):
if isinstance(policies, storage_policy.StoragePolicyCollection):
self.policies = policies
else:
self.policies = storage_policy.StoragePolicyCollection(policies)
self.fake_ring_args = fake_ring_args or [None] * len(self.policies)
def _setup_rings(self):
"""
Our tests tend to use the policies rings like their own personal
playground - which can be a problem in the particular case of a
patched TestCase class where the FakeRing objects are scoped in the
call to the patch_policies wrapper outside of the TestCase instance
which can lead to some bled state.
To help tests get better isolation without having to think about it,
here we're capturing the args required to *build* a new FakeRing
instances so we can ensure each test method gets a clean ring setup.
The TestCase can always "tweak" these fresh rings in setUp - or if
they'd prefer to get the same "reset" behavior with custom FakeRing's
they can pass in their own fake_ring_args to patch_policies instead of
setting the object_ring on the policy definitions.
"""
for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args):
if fake_ring_arg is not None:
policy.object_ring = FakeRing(**fake_ring_arg)
def __call__(self, thing):
if isinstance(thing, type):
return self._patch_class(thing)
else:
return self._patch_method(thing)
def _patch_class(self, cls):
"""
Creating a new class that inherits from decorated class is the more
common way I've seen class decorators done - but it seems to cause
infinite recursion when super is called from inside methods in the
decorated class.
"""
orig_setUp = cls.setUp
orig_tearDown = cls.tearDown
def setUp(cls_self):
self._orig_POLICIES = storage_policy._POLICIES
if not getattr(cls_self, '_policies_patched', False):
storage_policy._POLICIES = self.policies
self._setup_rings()
cls_self._policies_patched = True
orig_setUp(cls_self)
def tearDown(cls_self):
orig_tearDown(cls_self)
storage_policy._POLICIES = self._orig_POLICIES
cls.setUp = setUp
cls.tearDown = tearDown
return cls
def _patch_method(self, f):
@functools.wraps(f)
def mywrapper(*args, **kwargs):
self._orig_POLICIES = storage_policy._POLICIES
try:
storage_policy._POLICIES = self.policies
self._setup_rings()
return f(*args, **kwargs)
finally:
storage_policy._POLICIES = self._orig_POLICIES
return mywrapper
def __enter__(self):
self._orig_POLICIES = storage_policy._POLICIES
storage_policy._POLICIES = self.policies
def __exit__(self, *args):
storage_policy._POLICIES = self._orig_POLICIES
class FakeRing(Ring):
def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
base_port=1000):
"""
:param part_power: make part calculation based on the path
If you set a part_power when you setup your FakeRing the parts you get
out of ring methods will actually be based on the path - otherwise we
exercise the real ring code, but ignore the result and return 1.
"""
self._base_port = base_port
self.max_more_nodes = max_more_nodes
self._part_shift = 32 - part_power
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
# this is set higher, or R^2 for R replicas
self.set_replicas(replicas)
self._reload()
def _reload(self):
self._rtime = time.time()
def set_replicas(self, replicas):
self.replicas = replicas
self._devs = []
for x in range(self.replicas):
ip = '10.0.0.%s' % x
port = self._base_port + x
self._devs.append({
'ip': ip,
'replication_ip': ip,
'port': port,
'replication_port': port,
'device': 'sd' + (chr(ord('a') + x)),
'zone': x % 3,
'region': x % 2,
'id': x,
})
@property
def replica_count(self):
return self.replicas
def _get_part_nodes(self, part):
return [dict(node, index=i) for i, node in enumerate(list(self._devs))]
def get_more_nodes(self, part):
for x in range(self.replicas, (self.replicas + self.max_more_nodes)):
yield {'ip': '10.0.0.%s' % x,
'replication_ip': '10.0.0.%s' % x,
'port': self._base_port + x,
'replication_port': self._base_port + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x}
def write_fake_ring(path, *devs):
"""
Pretty much just a two node, two replica, 2 part power ring...
"""
dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6000}
dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6000}
dev1_updates, dev2_updates = devs or ({}, {})
dev1.update(dev1_updates)
dev2.update(dev2_updates)
replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
devs = [dev1, dev2]
part_shift = 30
with closing(GzipFile(path, 'wb')) as f:
pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
class FabricatedRing(Ring):
"""
When a FakeRing just won't do - you can fabricate one to meet
your tests needs.
"""
def __init__(self, replicas=6, devices=8, nodes=4, port=6000,
part_power=4):
self.devices = devices
self.nodes = nodes
self.port = port
self.replicas = 6
self.part_power = part_power
self._part_shift = 32 - self.part_power
self._reload()
def _reload(self, *args, **kwargs):
self._rtime = time.time() * 2
if hasattr(self, '_replica2part2dev_id'):
return
self._devs = [{
'region': 1,
'zone': 1,
'weight': 1.0,
'id': i,
'device': 'sda%d' % i,
'ip': '10.0.0.%d' % (i % self.nodes),
'replication_ip': '10.0.0.%d' % (i % self.nodes),
'port': self.port,
'replication_port': self.port,
} for i in range(self.devices)]
self._replica2part2dev_id = [
[None] * 2 ** self.part_power
for i in range(self.replicas)
]
dev_ids = itertools.cycle(range(self.devices))
for p in range(2 ** self.part_power):
for r in range(self.replicas):
self._replica2part2dev_id[r][p] = next(dev_ids)
class FakeMemcache(object):
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def keys(self):
return self.store.keys()
def set(self, key, value, time=0):
self.store[key] = value
return True
def incr(self, key, time=0):
self.store[key] = self.store.setdefault(key, 0) + 1
return self.store[key]
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
def readuntil2crlfs(fd):
rv = ''
lc = ''
crlfs = 0
while crlfs < 2:
c = fd.read(1)
if not c:
raise ValueError("didn't get two CRLFs; just got %r" % rv)
rv = rv + c
if c == '\r' and lc != '\n':
crlfs = 0
if lc == '\r' and c == '\n':
crlfs += 1
lc = c
return rv
def connect_tcp(hostport):
rv = socket.socket()
rv.connect(hostport)
return rv
@contextmanager
def tmpfile(content):
with NamedTemporaryFile('w', delete=False) as f:
file_name = f.name
f.write(str(content))
try:
yield file_name
finally:
os.unlink(file_name)
xattr_data = {}
def _get_inode(fd):
if not isinstance(fd, int):
try:
fd = fd.fileno()
except AttributeError:
return os.stat(fd).st_ino
return os.fstat(fd).st_ino
def _setxattr(fd, k, v):
inode = _get_inode(fd)
data = xattr_data.get(inode, {})
data[k] = v
xattr_data[inode] = data
def _getxattr(fd, k):
inode = _get_inode(fd)
data = xattr_data.get(inode, {}).get(k)
if not data:
raise IOError(errno.ENODATA, "Fake IOError")
return data
import xattr
xattr.setxattr = _setxattr
xattr.getxattr = _getxattr
@contextmanager
def temptree(files, contents=''):
# generate enough contents to fill the files
c = len(files)
contents = (list(contents) + [''] * c)[:c]
tempdir = mkdtemp()
for path, content in zip(files, contents):
if os.path.isabs(path):
path = '.' + path
new_path = os.path.join(tempdir, path)
subdir = os.path.dirname(new_path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(new_path, 'w') as f:
f.write(str(content))
try:
yield tempdir
finally:
rmtree(tempdir)
def with_tempdir(f):
"""
Decorator to give a single test a tempdir as argument to test method.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
tempdir = mkdtemp()
args = list(args)
args.append(tempdir)
try:
return f(*args, **kwargs)
finally:
rmtree(tempdir)
return wrapped
class NullLoggingHandler(logging.Handler):
def emit(self, record):
pass
class UnmockTimeModule(object):
"""
Even if a test mocks time.time - you can restore unmolested behavior in a
another module who imports time directly by monkey patching it's imported
reference to the module with an instance of this class
"""
_orig_time = time.time
def __getattribute__(self, name):
if name == 'time':
return UnmockTimeModule._orig_time
return getattr(time, name)
# logging.LogRecord.__init__ calls time.time
logging.time = UnmockTimeModule()
class FakeLogger(logging.Logger, object):
# a thread safe fake logger
def __init__(self, *args, **kwargs):
self._clear()
self.name = 'swift.unit.fake_logger'
self.level = logging.NOTSET
if 'facility' in kwargs:
self.facility = kwargs['facility']
self.statsd_client = None
self.thread_locals = None
self.parent = None
store_in = {
logging.ERROR: 'error',
logging.WARNING: 'warning',
logging.INFO: 'info',
logging.DEBUG: 'debug',
logging.CRITICAL: 'critical',
NOTICE: 'notice',
}
def notice(self, msg, *args, **kwargs):
"""
Convenience function for syslog priority LOG_NOTICE. The python
logging lvl is set to 25, just above info. SysLogHandler is
monkey patched to map this log lvl to the LOG_NOTICE syslog
priority.
"""
self.log(NOTICE, msg, *args, **kwargs)
def _log(self, level, msg, *args, **kwargs):
store_name = self.store_in[level]
cargs = [msg]
if any(args):
cargs.extend(args)
captured = dict(kwargs)
if 'exc_info' in kwargs and \
not isinstance(kwargs['exc_info'], tuple):
captured['exc_info'] = sys.exc_info()
self.log_dict[store_name].append((tuple(cargs), captured))
super(FakeLogger, self)._log(level, msg, *args, **kwargs)
def _clear(self):
self.log_dict = defaultdict(list)
self.lines_dict = {'critical': [], 'error': [], 'info': [],
'warning': [], 'debug': [], 'notice': []}
clear = _clear # this is a public interface
def get_lines_for_level(self, level):
if level not in self.lines_dict:
raise KeyError(
"Invalid log level '%s'; valid levels are %s" %
(level,
', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
return self.lines_dict[level]
def all_log_lines(self):
return dict((level, msgs) for level, msgs in self.lines_dict.items()
if len(msgs) > 0)
def _store_in(store_name):
def stub_fn(self, *args, **kwargs):
self.log_dict[store_name].append((args, kwargs))
return stub_fn
# mock out the StatsD logging methods:
update_stats = _store_in('update_stats')
increment = _store_in('increment')
decrement = _store_in('decrement')
timing = _store_in('timing')
timing_since = _store_in('timing_since')
transfer_rate = _store_in('transfer_rate')
set_statsd_prefix = _store_in('set_statsd_prefix')
def get_increments(self):
return [call[0][0] for call in self.log_dict['increment']]
def get_increment_counts(self):
counts = {}
for metric in self.get_increments():
if metric not in counts:
counts[metric] = 0
counts[metric] += 1
return counts
def setFormatter(self, obj):
self.formatter = obj
def close(self):
self._clear()
def set_name(self, name):
# don't touch _handlers
self._name = name
def acquire(self):
pass
def release(self):
pass
def createLock(self):
pass
def emit(self, record):
pass
def _handle(self, record):
try:
line = record.getMessage()
except TypeError:
print('WARNING: unable to format log message %r %% %r' % (
record.msg, record.args))
raise
self.lines_dict[record.levelname.lower()].append(line)
def handle(self, record):
self._handle(record)
def flush(self):
pass
def handleError(self, record):
pass
class DebugLogger(FakeLogger):
"""A simple stdout logging version of FakeLogger"""
def __init__(self, *args, **kwargs):
FakeLogger.__init__(self, *args, **kwargs)
self.formatter = logging.Formatter(
"%(server)s %(levelname)s: %(message)s")
def handle(self, record):
self._handle(record)
print(self.formatter.format(record))
class DebugLogAdapter(utils.LogAdapter):
def _send_to_logger(name):
def stub_fn(self, *args, **kwargs):
return getattr(self.logger, name)(*args, **kwargs)
return stub_fn
# delegate to FakeLogger's mocks
update_stats = _send_to_logger('update_stats')
increment = _send_to_logger('increment')
decrement = _send_to_logger('decrement')
timing = _send_to_logger('timing')
timing_since = _send_to_logger('timing_since')
transfer_rate = _send_to_logger('transfer_rate')
set_statsd_prefix = _send_to_logger('set_statsd_prefix')
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.__dict__['logger'], name)
def debug_logger(name='test'):
"""get a named adapted debug logger"""
return DebugLogAdapter(DebugLogger(), name)
original_syslog_handler = logging.handlers.SysLogHandler
def fake_syslog_handler():
for attr in dir(original_syslog_handler):
if attr.startswith('LOG'):
setattr(FakeLogger, attr,
copy.copy(getattr(logging.handlers.SysLogHandler, attr)))
FakeLogger.priority_map = \
copy.deepcopy(logging.handlers.SysLogHandler.priority_map)
logging.handlers.SysLogHandler = FakeLogger
if utils.config_true_value(
get_config('unit_test').get('fake_syslog', 'False')):
fake_syslog_handler()
class MockTrue(object):
"""
Instances of MockTrue evaluate like True
Any attr accessed on an instance of MockTrue will return a MockTrue
instance. Any method called on an instance of MockTrue will return
a MockTrue instance.
>>> thing = MockTrue()
>>> thing
True
>>> thing == True # True == True
True
>>> thing == False # True == False
False
>>> thing != True # True != True
False
>>> thing != False # True != False
True
>>> thing.attribute
True
>>> thing.method()
True
>>> thing.attribute.method()
True
>>> thing.method().attribute
True
"""
def __getattribute__(self, *args, **kwargs):
return self
def __call__(self, *args, **kwargs):
return self
def __repr__(*args, **kwargs):
return repr(True)
def __eq__(self, other):
return other is True
def __ne__(self, other):
return other is not True
@contextmanager
def mock(update):
returns = []
deletes = []
for key, value in update.items():
imports = key.split('.')
attr = imports.pop(-1)
module = __import__(imports[0], fromlist=imports[1:])
for modname in imports[1:]:
module = getattr(module, modname)
if hasattr(module, attr):
returns.append((module, attr, getattr(module, attr)))
else:
deletes.append((module, attr))
setattr(module, attr, value)
try:
yield True
finally:
for module, attr, value in returns:
setattr(module, attr, value)
for module, attr in deletes:
delattr(module, attr)
class FakeStatus(object):
"""
This will work with our fake_http_connect, if you hand in one of these
instead of a status int or status int tuple to the "codes" iter you can
add some eventlet sleep to the expect and response stages of the
connection.
"""
def __init__(self, status, expect_sleep=None, response_sleep=None):
"""
:param status: the response status int, or a tuple of
([expect_status, ...], response_status)
:param expect_sleep: float, time to eventlet sleep during expect, can
be a iter of floats
:param response_sleep: float, time to eventlet sleep during response
"""
# connect exception
if isinstance(status, (Exception, eventlet.Timeout)):
raise status
if isinstance(status, tuple):
self.expect_status = list(status[:-1])
self.status = status[-1]
self.explicit_expect_list = True
else:
self.expect_status, self.status = ([], status)
self.explicit_expect_list = False
if not self.expect_status:
# when a swift backend service returns a status before reading
# from the body (mostly an error response) eventlet.wsgi will
# respond with that status line immediately instead of 100
# Continue, even if the client sent the Expect 100 header.
# BufferedHttp and the proxy both see these error statuses
# when they call getexpect, so our FakeConn tries to act like
# our backend services and return certain types of responses
# as expect statuses just like a real backend server would do.
if self.status in (507, 412, 409):
self.expect_status = [status]
else:
self.expect_status = [100, 100]
# setup sleep attributes
if not isinstance(expect_sleep, (list, tuple)):
expect_sleep = [expect_sleep] * len(self.expect_status)
self.expect_sleep_list = list(expect_sleep)
while len(self.expect_sleep_list) < len(self.expect_status):
self.expect_sleep_list.append(None)
self.response_sleep = response_sleep
def get_response_status(self):
if self.response_sleep is not None:
eventlet.sleep(self.response_sleep)
if self.expect_status and self.explicit_expect_list:
raise Exception('Test did not consume all fake '
'expect status: %r' % (self.expect_status,))
if isinstance(self.status, (Exception, eventlet.Timeout)):
raise self.status
return self.status
def get_expect_status(self):
expect_sleep = self.expect_sleep_list.pop(0)
if expect_sleep is not None:
eventlet.sleep(expect_sleep)
expect_status = self.expect_status.pop(0)
if isinstance(expect_status, (Exception, eventlet.Timeout)):
raise expect_status
return expect_status
class SlowBody(object):
"""
This will work with our fake_http_connect, if you hand in these
instead of strings it will make reads take longer by the given
amount. It should be a little bit easier to extend than the
current slow kwarg - which inserts whitespace in the response.
Also it should be easy to detect if you have one of these (or a
subclass) for the body inside of FakeConn if we wanted to do
something smarter than just duck-type the str/buffer api
enough to get by.
"""
def __init__(self, body, slowness):
self.body = body
self.slowness = slowness
def slowdown(self):
eventlet.sleep(self.slowness)
def __getitem__(self, s):
return SlowBody(self.body[s], self.slowness)
def __len__(self):
return len(self.body)
def __radd__(self, other):
self.slowdown()
return other + self.body
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
headers=None, expect_headers=None, connection_id=None,
give_send=None):
if not isinstance(status, FakeStatus):
status = FakeStatus(status)
self._status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.sent = 0
self.received = 0
self.etag = etag
self.body = body
self.headers = headers or {}
self.expect_headers = expect_headers or {}
self.timestamp = timestamp
self.connection_id = connection_id
self.give_send = give_send
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
try:
self._next_sleep = kwargs['slow'].pop(0)
except IndexError:
self._next_sleep = None
# be nice to trixy bits with node_iter's
eventlet.sleep()
def getresponse(self):
exc = kwargs.get('raise_exc')
if exc:
if isinstance(exc, (Exception, eventlet.Timeout)):
raise exc
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
raise eventlet.Timeout()
self.status = self._status.get_response_status()
return self
def getexpect(self):
expect_status = self._status.get_expect_status()
headers = dict(self.expect_headers)
if expect_status == 409:
headers['X-Backend-Timestamp'] = self.timestamp
response = FakeConn(expect_status,
timestamp=self.timestamp,
headers=headers)
response.status = expect_status
return response
def getheaders(self):
etag = self.etag
if not etag:
if isinstance(self.body, str):
etag = '"' + md5(self.body).hexdigest() + '"'
else:
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
headers = swob.HeaderKeyDict({
'content-length': len(self.body),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
'x-backend-timestamp': self.timestamp,
'last-modified': self.timestamp,
'x-object-meta-test': 'testing',
'x-delete-at': '9876543210',
'etag': etag,
'x-works': 'yes',
})
if self.status // 100 == 2:
headers['x-account-container-count'] = \
kwargs.get('count', 12345)
if not self.timestamp:
# when timestamp is None, HeaderKeyDict raises KeyError
headers.pop('x-timestamp', None)
try:
if next(container_ts_iter) is False:
headers['x-container-timestamp'] = '1'
except StopIteration:
pass
am_slow, value = self.get_slow()
if am_slow:
headers['content-length'] = '4'
headers.update(self.headers)
return headers.items()
def get_slow(self):
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
if self._next_sleep is not None:
return True, self._next_sleep
else:
return False, 0.01
if kwargs.get('slow') and isinstance(kwargs['slow'], Number):
return True, kwargs['slow']
return bool(kwargs.get('slow')), 0.1
def read(self, amt=None):
am_slow, value = self.get_slow()
if am_slow:
if self.sent < 4:
self.sent += 1
eventlet.sleep(value)
return ' '
rv = self.body[:amt]
self.body = self.body[amt:]
return rv
def send(self, amt=None):
if self.give_send:
self.give_send(self.connection_id, amt)
am_slow, value = self.get_slow()
if am_slow:
if self.received < 4:
self.received += 1
eventlet.sleep(value)
def getheader(self, name, default=None):
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
def close(self):
pass
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
if isinstance(kwargs.get('headers'), (list, tuple)):
headers_iter = iter(kwargs['headers'])
else:
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
if isinstance(kwargs.get('expect_headers'), (list, tuple)):
expect_headers_iter = iter(kwargs['expect_headers'])
else:
expect_headers_iter = iter([kwargs.get('expect_headers', {})] *
len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
conn_id_and_code_iter = enumerate(code_iter)
static_body = kwargs.get('body', None)
body_iter = kwargs.get('body_iter', None)
if body_iter:
body_iter = iter(body_iter)
def connect(*args, **ckwargs):
if kwargs.get('slow_connect', False):
eventlet.sleep(0.1)
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
i, status = next(conn_id_and_code_iter)
if 'give_connect' in kwargs:
give_conn_fn = kwargs['give_connect']
argspec = inspect.getargspec(give_conn_fn)
if argspec.keywords or 'connection_id' in argspec.args:
ckwargs['connection_id'] = i
give_conn_fn(*args, **ckwargs)
etag = next(etag_iter)
headers = next(headers_iter)
expect_headers = next(expect_headers_iter)
timestamp = next(timestamps_iter)
if status <= 0:
raise HTTPException()
if body_iter is None:
body = static_body or ''
else:
body = next(body_iter)
return FakeConn(status, etag, body=body, timestamp=timestamp,
headers=headers, expect_headers=expect_headers,
connection_id=i, give_send=kwargs.get('give_send'))
connect.code_iter = code_iter
return connect
@contextmanager
def mocked_http_conn(*args, **kwargs):
requests = []
def capture_requests(ip, port, method, path, headers, qs, ssl):
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
requests.append(req)
kwargs.setdefault('give_connect', capture_requests)
fake_conn = fake_http_connect(*args, **kwargs)
fake_conn.requests = requests
with mocklib.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
left_over_status = list(fake_conn.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
def make_timestamp_iter():
return iter(Timestamp(t) for t in itertools.count(int(time.time())))
| 32.104306
| 79
| 0.592924
| 4,079
| 33,549
| 4.703849
| 0.177004
| 0.013759
| 0.007505
| 0.007818
| 0.147965
| 0.092458
| 0.068171
| 0.04232
| 0.026372
| 0.016782
| 0
| 0.011366
| 0.307669
| 33,549
| 1,044
| 80
| 32.135057
| 0.814698
| 0.1561
| 0
| 0.182442
| 0
| 0
| 0.063651
| 0.005997
| 0
| 0
| 0
| 0
| 0.001372
| 1
| 0.128944
| false
| 0.013717
| 0.049383
| 0.021948
| 0.29904
| 0.004115
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab3a62e50f821717cc617bcae69096621bae1d3
| 10,138
|
py
|
Python
|
fairseq/models/bart/model.py
|
samsontmr/fairseq
|
1d50b6dcd961faaa74ee32e9d7a02ff76f16ab87
|
[
"MIT"
] | 172
|
2019-08-22T14:20:25.000Z
|
2022-02-16T07:38:12.000Z
|
fairseq/models/bart/model.py
|
samsontmr/fairseq
|
1d50b6dcd961faaa74ee32e9d7a02ff76f16ab87
|
[
"MIT"
] | 3
|
2019-08-30T11:56:15.000Z
|
2020-10-02T13:57:49.000Z
|
fairseq/models/bart/model.py
|
samsontmr/fairseq
|
1d50b6dcd961faaa74ee32e9d7a02ff76f16ab87
|
[
"MIT"
] | 8
|
2019-10-15T04:36:43.000Z
|
2020-10-21T01:50:09.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BART: Denoising Sequence-to-Sequence Pre-training for
Natural Language Generation, Translation, and Comprehension
"""
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerModel
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import BARTHubInterface
@register_model('bart')
class BARTModel(TransformerModel):
@classmethod
def hub_models(cls):
return {
'bart.large': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz',
'bart.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz',
}
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
super(BARTModel, BARTModel).add_args(parser)
parser.add_argument(
'--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence'
)
parser.add_argument(
'--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence'
)
parser.add_argument(
'--pooler-dropout', type=float, metavar='D',
help='dropout probability in the masked_lm pooler layers'
)
parser.add_argument(
'--pooler-activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use for pooler layer'
)
@property
def supported_targets(self):
return {'self'}
def forward(
self, src_tokens, src_lengths, prev_output_tokens,
features_only=False, classification_head_name=None, **kwargs
):
if classification_head_name is not None:
features_only = True
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
**kwargs,
)
x, extra = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
**kwargs,
)
if classification_head_name is not None:
sentence_representation = x[
src_tokens.eq(self.encoder.dictionary.eos()), :
].view(x.size(0), -1, x.size(-1))[:, -1, :]
x = self.classification_heads[classification_head_name](
sentence_representation
)
return x, extra
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
bpe='gpt2',
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return BARTHubInterface(x['args'], x['task'], x['models'][0])
def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):
"""Register a classification head."""
print("Registering classification head: {0}".format(name))
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
print(
'WARNING: re-registering head "{}" with num_classes {} (prev: {}) '
'and inner_dim {} (prev: {})'.format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = BARTClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + '.' if name != '' else ''
current_head_names = [] if not hasattr(self, 'classification_heads') else \
self.classification_heads.keys()
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + 'classification_heads.'):
continue
head_name = k[len(prefix + 'classification_heads.'):].split('.')[0]
num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0)
inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0)
if getattr(self.args, 'load_checkpoint_heads', False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
print(
'WARNING: deleting classification head ({}) from checkpoint '
'not present in current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes != self.classification_heads[head_name].out_proj.out_features
or inner_dim != self.classification_heads[head_name].dense.out_features
):
print(
'WARNING: deleting classification head ({}) from checkpoint '
'with different dimensions than current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, 'classification_heads'):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + 'classification_heads.' + k not in state_dict:
print('Overwriting', prefix + 'classification_heads.' + k)
state_dict[prefix + 'classification_heads.' + k] = v
class BARTClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@register_model_architecture('bart', 'bart_large')
def bart_large_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4*1024)
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.relu_dropout = getattr(args, 'relu_dropout', 0.)
args.dropout = getattr(args, 'dropout', 0.1)
args.max_target_positions = getattr(args, 'max_target_positions', 1024)
args.max_source_positions = getattr(args, 'max_source_positions', 1024)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', True)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', True)
args.layernorm_embedding = getattr(args, 'layernorm_embedding', True)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)
| 41.044534
| 111
| 0.641251
| 1,187
| 10,138
| 5.192923
| 0.20219
| 0.053537
| 0.044776
| 0.015412
| 0.232966
| 0.15623
| 0.115509
| 0.072356
| 0.034718
| 0.034718
| 0
| 0.00666
| 0.25942
| 10,138
| 246
| 112
| 41.211382
| 0.814331
| 0.054843
| 0
| 0.154229
| 0
| 0.004975
| 0.169195
| 0.046981
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054726
| false
| 0
| 0.034826
| 0.00995
| 0.124378
| 0.024876
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab4adb92969b15eb2c974889f086f66aef842c0
| 3,351
|
py
|
Python
|
tb_plugin/torch_tb_profiler/profiler/trace.py
|
azhou-determined/kineto
|
46ed0ce917c1515db29c39cd87b0c5430f5be94e
|
[
"BSD-3-Clause"
] | null | null | null |
tb_plugin/torch_tb_profiler/profiler/trace.py
|
azhou-determined/kineto
|
46ed0ce917c1515db29c39cd87b0c5430f5be94e
|
[
"BSD-3-Clause"
] | null | null | null |
tb_plugin/torch_tb_profiler/profiler/trace.py
|
azhou-determined/kineto
|
46ed0ce917c1515db29c39cd87b0c5430f5be94e
|
[
"BSD-3-Clause"
] | 2
|
2021-08-12T08:00:41.000Z
|
2021-08-20T03:41:03.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from enum import IntEnum
from .. import utils
__all__ = ["EventTypes", "create_event"]
logger = utils.get_logger()
DeviceType = IntEnum('DeviceType', ['CPU', 'CUDA'], start=0)
class EventTypes(object):
TRACE = "Trace"
OPERATOR = "Operator"
PROFILER_STEP = "ProfilerStep"
RUNTIME = "Runtime"
KERNEL = "Kernel"
MEMCPY = "Memcpy"
MEMSET = "Memset"
PYTHON = "Python"
MEMORY = "Memory"
Supported_EventTypes = [v for k, v in vars(EventTypes).items() if not k.startswith("_") and v != EventTypes.PROFILER_STEP]
class BaseEvent(object):
def __init__(self, type, data):
self.type = type
self.name = data.get("name")
self.ts = data.get("ts")
self.pid = data.get("pid")
self.tid = data.get("tid")
self.args = data.get("args", {})
class TraceEvent(BaseEvent):
def __init__(self, type, data):
super().__init__(type, data)
self.category = data.get("cat", "")
self.duration = data.get("dur")
@property
def external_id(self):
extern_id = self.args.get("external id")
if extern_id is None:
extern_id = self.args.get("External id")
return extern_id
@property
def callstack(self):
return self.args.get("Call stack", "")
@property
def input_shape(self):
shape = self.args.get("Input Dims")
if shape is None:
shape = self.args.get("Input dims")
return shape
@property
def input_type(self):
return self.args.get("Input type")
class ProfilerStepEvent(TraceEvent):
def __init__(self, data):
super().__init__(EventTypes.PROFILER_STEP, data)
# torch.profiler.profile.step will invoke record_function with name like "ProfilerStep#5"
self.step = int(self.name.split("#")[1])
class MemoryEvent(BaseEvent):
def __init__(self, type, data):
super().__init__(type, data)
self.scope = data.get("s", "")
@property
def device_type(self):
dtype = self.args.get("Device Type")
if dtype is None:
return None
try:
return DeviceType(dtype)
except ValueError:
return None
@property
def device_id(self):
return self.args.get("Device Id")
@property
def bytes(self):
return self.args.get("Bytes", 0)
def create_event(event):
try:
type = event.get("ph")
if type == "X":
return create_trace_event(event)
elif type == "i" and event.get('s') == 't':
return MemoryEvent(EventTypes.MEMORY, event)
else:
return None
except Exception as ex:
logger.warning("Failed to parse profile event. Exception=%s. Event=%s", ex, event, exc_info=True)
raise
def create_trace_event(event):
category = event.get("cat")
if category == "Operator":
name = event.get("name")
if name and name.startswith("ProfilerStep#"):
return ProfilerStepEvent(event)
if category in Supported_EventTypes:
return TraceEvent(category, event)
else:
return None
| 27.925
| 122
| 0.578036
| 386
| 3,351
| 4.873057
| 0.297927
| 0.042531
| 0.052632
| 0.038278
| 0.164274
| 0.109516
| 0.082935
| 0.0521
| 0.0521
| 0.0521
| 0
| 0.001608
| 0.257535
| 3,351
| 119
| 123
| 28.159664
| 0.754421
| 0.087437
| 0
| 0.217391
| 0
| 0
| 0.098296
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.141304
| false
| 0
| 0.021739
| 0.043478
| 0.478261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab57006541d451df413f174cce8e16652508bf7
| 1,399
|
py
|
Python
|
indexclient/parsers/info.py
|
uc-cdis/indexclient
|
5d61bdb2cb9c0104f173d7bba43d92449a093c6d
|
[
"Apache-2.0"
] | 2
|
2020-02-19T15:52:13.000Z
|
2021-10-30T12:06:22.000Z
|
indexclient/parsers/info.py
|
uc-cdis/indexclient
|
5d61bdb2cb9c0104f173d7bba43d92449a093c6d
|
[
"Apache-2.0"
] | 20
|
2017-11-30T18:15:53.000Z
|
2021-08-20T16:14:17.000Z
|
indexclient/parsers/info.py
|
uc-cdis/indexclient
|
5d61bdb2cb9c0104f173d7bba43d92449a093c6d
|
[
"Apache-2.0"
] | 1
|
2019-01-31T21:07:50.000Z
|
2019-01-31T21:07:50.000Z
|
import sys
import json
import logging
import argparse
import warnings
import requests
from indexclient import errors
# DEPRECATED 11/2019 -- interacts with old `/alias/` endpoint.
# For creating aliases for indexd records, prefer using
# the `add_alias` function, which interacts with the new
# `/index/{GUID}/aliases` endpoint.
def info(host, port, name, **kwargs):
"""
Retrieve info by name.
"""
warnings.warn(
(
"This function is deprecated. For creating aliases for indexd "
"records, prefer using the `add_alias_for_did` function, which "
"interacts with the new `/index/{GUID}/aliases` endpoint."
),
DeprecationWarning,
)
resource = "http://{host}:{port}/alias/{name}".format(
host=host, port=port, name=name
)
res = requests.get(resource)
try:
res.raise_for_status()
except Exception as err:
raise errors.BaseIndexError(res.status_code, res.text)
try:
doc = res.json()
except ValueError as err:
reason = json.dumps({"error": "invalid json payload returned"})
raise errors.BaseIndexError(res.status_code, reason)
sys.stdout.write(json.dumps(doc))
def config(parser):
"""
Configure the info command.
"""
parser.set_defaults(func=info)
parser.add_argument("name", help="name of information to retrieve")
| 25.436364
| 76
| 0.653324
| 170
| 1,399
| 5.317647
| 0.482353
| 0.043142
| 0.039823
| 0.04646
| 0.331858
| 0.331858
| 0.247788
| 0.247788
| 0.247788
| 0.247788
| 0
| 0.005613
| 0.235883
| 1,399
| 54
| 77
| 25.907407
| 0.840037
| 0.182273
| 0
| 0.060606
| 0
| 0
| 0.253153
| 0.020721
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.212121
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab5f637fa16cf262bdd8660fa9b73c3bc7980b2
| 1,151
|
py
|
Python
|
email-worker-compose/app/sender.py
|
guilhermebc/docker-playground
|
e614c314ed2f5ab54835a8c45b4b3eec1ac4c57b
|
[
"MIT"
] | 1
|
2019-08-31T11:03:33.000Z
|
2019-08-31T11:03:33.000Z
|
email-worker-compose/app/sender.py
|
guilhermebc/docker-playground
|
e614c314ed2f5ab54835a8c45b4b3eec1ac4c57b
|
[
"MIT"
] | 6
|
2020-09-07T03:12:42.000Z
|
2022-03-02T05:25:57.000Z
|
email-worker-compose/app/sender.py
|
guilhermebc/docker-playground
|
e614c314ed2f5ab54835a8c45b4b3eec1ac4c57b
|
[
"MIT"
] | null | null | null |
import psycopg2
import redis
import json
from bottle import Bottle, request
class Sender(Bottle):
def __init__(self):
super().__init__()
self.route('/', method='POST', callback=self.send)
self.fila = redis.StrictRedis(host='queue', port=6379, db=0)
DSN = 'dbname=email_sender user=postgress host=db'
self.conn = psycopg2.connect(DSN)
def register_message(self, assunto, mensagem):
SQL = 'INSERT INTO emails (assunto, mensagem) VALUES (%s, %s)'
cur = self.conn.cursor()
cur.execute(SQL, (assunto, mensagem))
self.conn.commit()
cur.close()
msg = {'assunto': assunto, 'mensagem': mensagem}
self.fila.rpush('sender', json.dumps(msg))
print('Message registered!')
def send(self):
assunto = request.forms.get('assunto')
mensagem = request.forms.get('mensagem')
self.register_message(assunto, mensagem)
return 'Message queued! Assunto: {} Mensage: {}'.format(
assunto, mensagem
)
if __name__ == '__main__':
sender = Sender()
sender.run(host='0.0.0.0', port=8080, debug=True)
| 30.289474
| 70
| 0.617724
| 135
| 1,151
| 5.125926
| 0.488889
| 0.151734
| 0.043353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017162
| 0.24066
| 1,151
| 38
| 71
| 30.289474
| 0.7746
| 0
| 0
| 0
| 0
| 0
| 0.186632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.133333
| 0
| 0.3
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab5f90a0a47ff5a1e41ea47789a04eed55d4b77
| 8,949
|
py
|
Python
|
tests/ximpl.py
|
zsimic/sandbox
|
3d1571ca723d1a5e80ddecae0ad912160334fee9
|
[
"MIT"
] | null | null | null |
tests/ximpl.py
|
zsimic/sandbox
|
3d1571ca723d1a5e80ddecae0ad912160334fee9
|
[
"MIT"
] | null | null | null |
tests/ximpl.py
|
zsimic/sandbox
|
3d1571ca723d1a5e80ddecae0ad912160334fee9
|
[
"MIT"
] | null | null | null |
import click
import poyo
import ruamel.yaml
import runez
import strictyaml
import yaml as pyyaml
from zyaml import load_path, load_string, tokens_from_path, tokens_from_string
from zyaml.marshal import decode, default_marshal, represented_scalar
from . import TestSettings
class ImplementationCollection(object):
def __init__(self, names, default="zyaml,ruamel"):
av = [ZyamlImplementation, RuamelImplementation, PyyamlBaseImplementation, PoyoImplementation, StrictImplementation]
self.available = dict((m.name, m()) for m in av)
self.unknown = []
self.selected = []
if names.startswith("+"):
names = "%s,%s" % (names[1:], default)
names = [s.strip() for s in names.split(",")]
names = [s for s in names if s]
seen = {}
for name in names:
found = 0
for i in self.available.values():
if name == "all" or name in i.name:
if i.name not in seen:
seen[i.name] = True
self.selected.append(i)
found += 1
if found == 0:
self.unknown.append(name)
self.combinations = None
def track_result_combination(self, impl, data):
if isinstance(data, Exception):
value = runez.stringified(data)
else:
value = runez.represented_json(data, stringify=decode, keep_none=True, none_key="-null-")
name = impl.name
if self.combinations is None:
self.combinations = {}
for i1 in self.selected:
for i2 in self.selected:
if i1.name < i2.name:
self.combinations[(i1.name, i2.name)] = set()
for names, values in self.combinations.items():
if name in names:
values.add(value)
def __repr__(self):
return ",".join(str(i) for i in self.selected)
def __len__(self):
return len(self.selected)
def __iter__(self):
for i in self.selected:
yield i
class Implementation(object):
"""Implementation of loading a yml file"""
name = None # type: str
def __repr__(self):
return self.name
@classmethod
def option(cls, default="zyaml,ruamel", count=None, **kwargs):
"""
Args:
default (str | None): Default implementation(s) to use
count (int | None): Optional: exact number of implementations that have to specified
**kwargs: Passed-through to click
"""
kwargs["default"] = default
def _callback(_ctx, _param, value):
if not value:
return None
impls = ImplementationCollection(value, default=default)
if impls.unknown:
raise click.BadParameter("Unknown implementation(s): %s" % ", ".join(impls.unknown))
if count and len(impls) != count:
if count == 1:
raise click.BadParameter("Need exactly 1 implementation")
raise click.BadParameter("Need exactly %s" % runez.plural(count, "implementation"))
if count == 1:
return impls.selected[0]
return impls
metavar = "I1,..."
hlp = "Implementation(s)"
if count:
hlp = runez.plural(count, "implementation")
metavar = ",".join("I%s" % (i + 1) for i in range(count))
kwargs.setdefault("help", "%s to use" % hlp)
kwargs.setdefault("show_default", True)
kwargs.setdefault("metavar", metavar)
name = "implementation" if count == 1 else "implementations"
return click.option(name, "-i", callback=_callback, **kwargs)
def show_result(self, data, tokens=False):
rtype = "tokens" if tokens else data.__class__.__name__ if data is not None else "None"
rep = data
if not tokens or isinstance(data, Exception):
rep = TestSettings.represented(data)
message = "---- %s: %s" % (runez.bold(self.name), runez.dim(rtype))
if isinstance(data, NotImplementedError):
print("%s - %s" % (message, rep))
return
print(message)
print(rep)
def get_outcome(self, content, tokens=False):
if tokens:
data = self.tokens(content)
if isinstance(data, list):
data = "\n".join(self.represented_token(t) for t in data)
return data
return self.deserialized(content)
def deserialized(self, source):
value = TestSettings.protected_call(self._deserialized, source)
return self._simplified(value)
def tokens(self, source):
return TestSettings.protected_call(self._tokenize, source)
def represented_token(self, token):
return str(token)
def _deserialized(self, source):
if hasattr(source, "path"):
return self._deserialized_from_path(source.path)
return self._deserialized_from_string(source)
def _deserialized_from_path(self, path):
with open(path) as fh:
return self._deserialized_from_string(fh.read())
def _deserialized_from_string(self, source):
raise NotImplementedError()
def _tokenize(self, source):
if hasattr(source, "path"):
return self._tokens_from_path(source.path)
return self._tokens_from_string(source)
def _tokens_from_path(self, path):
with open(path) as fh:
return TestSettings.unwrapped(self._tokens_from_string(fh.read()))
def _tokens_from_string(self, source):
raise NotImplementedError()
def _simplified(self, value):
if isinstance(value, list) and len(value) == 1:
return value[0]
return value
class ZyamlImplementation(Implementation):
name = "zyaml"
def _deserialized_from_path(self, path):
return load_path(path)
def _deserialized_from_string(self, source):
return load_string(source)
def _tokens_from_path(self, path):
return tokens_from_path(path)
def _tokens_from_string(self, source):
return tokens_from_string(source)
def _simplified(self, value):
return value
def ruamel_passthrough_tags(loader, tag, node):
name = node.__class__.__name__
if "Seq" in name:
result = []
for v in node.value:
result.append(ruamel_passthrough_tags(loader, tag, v))
return result
if "Map" in name:
result = {}
for k, v in node.value:
k = ruamel_passthrough_tags(loader, tag, k)
v = ruamel_passthrough_tags(loader, tag, v)
result[k] = v
return result
return default_marshal(node.value)
class RuamelImplementation(Implementation):
name = "ruamel"
def _deserialized_from_string(self, source):
y = ruamel.yaml.YAML(typ="safe")
ruamel.yaml.add_multi_constructor("", ruamel_passthrough_tags, Loader=ruamel.yaml.SafeLoader)
return y.load_all(source)
def _tokens_from_string(self, source):
return ruamel.yaml.main.scan(source)
class PyyamlBaseImplementation(Implementation):
name = "pyyaml"
def _deserialized_from_string(self, source):
return pyyaml.load_all(source, Loader=pyyaml.BaseLoader)
def _tokens_from_string(self, source):
yaml_loader = pyyaml.BaseLoader(source)
curr = yaml_loader.get_token()
while curr is not None:
yield curr
curr = yaml_loader.get_token()
def represented_token(self, token):
linenum = token.start_mark.line + 1
column = token.start_mark.column + 1
result = "%s[%s,%s]" % (token.__class__.__name__, linenum, column)
value = getattr(token, "value", None)
if value is not None:
if token.id == "<scalar>":
value = represented_scalar(token.style, value)
elif token.id == "<anchor>":
value = "&%s" % value
elif token.id == "<alias>":
value = "*%s" % value
elif token.id == "<tag>":
assert isinstance(value, tuple)
value = " ".join(str(s) for s in runez.flattened(value))
elif token.id == "<directive>":
result += " %s" % token.name
value = " ".join(str(s) for s in runez.flattened(value))
else:
assert False
result = "%s %s" % (result, value)
return result
class PoyoImplementation(Implementation):
name = "poyo"
def _deserialized_from_string(self, source):
return [poyo.parse_string(source)]
class StrictImplementation(Implementation):
name = "strict"
def _deserialized_from_string(self, source):
obj = strictyaml.dirty_load(source, allow_flow_style=True)
return obj.data
| 30.233108
| 124
| 0.598391
| 1,018
| 8,949
| 5.094303
| 0.193517
| 0.030852
| 0.026996
| 0.038565
| 0.251253
| 0.183186
| 0.110875
| 0.055534
| 0.02931
| 0.02931
| 0
| 0.003341
| 0.297575
| 8,949
| 295
| 125
| 30.335593
| 0.821667
| 0.026707
| 0
| 0.18932
| 0
| 0
| 0.044473
| 0
| 0
| 0
| 0
| 0
| 0.009709
| 1
| 0.160194
| false
| 0.024272
| 0.043689
| 0.063107
| 0.436893
| 0.014563
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab6c4226f6262d47cafb69a5403744916d50994
| 17,464
|
py
|
Python
|
mummi_ras/online/aa/aa_get_tiltrot_z_state.py
|
mummi-framework/mummi-ras
|
7f4522aad36661e4530e39c830ab8c2a6f134060
|
[
"MIT"
] | 4
|
2021-11-16T07:16:36.000Z
|
2022-02-16T23:33:46.000Z
|
mummi_ras/online/aa/aa_get_tiltrot_z_state.py
|
mummi-framework/mummi-ras
|
7f4522aad36661e4530e39c830ab8c2a6f134060
|
[
"MIT"
] | 1
|
2021-11-23T20:23:28.000Z
|
2021-12-03T09:08:34.000Z
|
mummi_ras/online/aa/aa_get_tiltrot_z_state.py
|
mummi-framework/mummi-ras
|
7f4522aad36661e4530e39c830ab8c2a6f134060
|
[
"MIT"
] | 2
|
2021-11-23T19:54:59.000Z
|
2022-02-16T23:32:17.000Z
|
###############################################################################
# @todo add Pilot2-splash-app disclaimer
###############################################################################
""" Get's KRAS states """
import MDAnalysis as mda
from MDAnalysis.analysis import align
from MDAnalysis.lib.mdamath import make_whole
import os
import numpy as np
import math
############## Below section needs to be uncommented ############
import mummi_core
import mummi_ras
from mummi_core.utils import Naming
# # Logger has to be initialized the first thing in the script
from logging import getLogger
LOGGER = getLogger(__name__)
# # Innitilize MuMMI if it has not been done before
# MUMMI_ROOT = mummi.init(True)
# This is needed so the Naming works below
#@TODO fix this so we don't have these on import make them as an init
mummi_core.init()
dirKRASStates = Naming.dir_res('states')
dirKRASStructures = Naming.dir_res('structures')
# #RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-ONLY.microstates.txt"))
RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-states.txt"),comments='#')
# #RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-RAF.microstates.txt"))
RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-raf-states.txt"),comments='#') # Note diffrent number of columns so index change below
# TODO: CS, my edits to test
# RAS_ONLY_macrostate = np.loadtxt('ras-states.txt')
# RAS_RAF_macrostate = np.loadtxt('ras-raf-states.txt')
############## above section needs to be uncommented ############
# TODO: CS, my edits to test
# TODO: TSC, The reference structure has to currently be set as the 'RAS-ONLY-reference-structure.gro'
# TODO: TSC, path to the reference structure is: mummi_resources/structures/
kras_ref_universe = mda.Universe(os.path.join(dirKRASStructures, "RAS-ONLY-reference-structure.gro"))
# kras_ref_universe = mda.Universe("RAS-ONLY-reference-structure.gro")
# kras_ref_universe = mda.Universe('AA_pfpatch_000000004641_RAS_RAF2_411.gro')
# TODO: CS, not using these for x4 proteins; instead using protein_systems below to set num_res
######### Below hard codes the number of residues within RAS-only and RAS-RAF ##########
RAS_only_num_res = 184
RAS_RAF_num_res = 320
######### Above hard codes the number of residues within RAS-only and RAS-RAF ##########
####### This can be removed
# def get_kras(syst, kras_start):
# """Gets all atoms for a KRAS protein starting at 'kras_start'."""
# return syst.atoms[kras_start:kras_start+428]
####### This can be removed
def get_segids(u):
"""Identifies the list of segments within the system. Only needs to be called x1 time"""
segs = u.segments
segs = segs.segids
ras_segids = []
rasraf_segids = []
for i in range(len(segs)):
# print(segs[i])
if segs[i][-3:] == 'RAS':
ras_segids.append(segs[i])
if segs[i][-3:] == 'RAF':
rasraf_segids.append(segs[i])
return ras_segids, rasraf_segids
def get_protein_info(u,tag):
"""Uses the segments identified in get_segids to make a list of all proteins in the systems.\
Outputs a list of the first residue number of the protein, and whether it is 'RAS-ONLY', or 'RAS-RAF'.\
The 'tag' input defines what is used to identify the first residue of the protein. i.e. 'resname ACE1 and name BB'.\
Only needs to be called x1 time"""
ras_segids, rasraf_segids = get_segids(u)
if len(ras_segids) > 0:
RAS = u.select_atoms('segid '+ras_segids[0]+' and '+str(tag))
else:
RAS = []
if len(rasraf_segids) > 0:
RAF = u.select_atoms('segid '+rasraf_segids[0]+' and '+str(tag))
else:
RAF = []
protein_info = []#np.empty([len(RAS)+len(RAF),2])
for i in range(len(RAS)):
protein_info.append((RAS[i].resid,'RAS-ONLY'))
for i in range(len(RAF)):
protein_info.append((RAF[i].resid,'RAS-RAF'))
######## sort protein info
protein_info = sorted(protein_info)
######## sort protein info
return protein_info
def get_ref_kras():
"""Gets the reference KRAS struct. Only called x1 time when class is loaded"""
start_of_g_ref = kras_ref_universe.residues[0].resid
ref_selection = 'resid '+str(start_of_g_ref)+':'+str(start_of_g_ref+24)+' ' +\
str(start_of_g_ref+38)+':'+str(start_of_g_ref+54)+' ' +\
str(start_of_g_ref+67)+':'+str(start_of_g_ref+164)+' ' +\
'and (name CA or name BB)'
r2_26r40_56r69_166_ref = kras_ref_universe.select_atoms(str(ref_selection))
return kras_ref_universe.select_atoms(str(ref_selection)).positions - kras_ref_universe.select_atoms(str(ref_selection)).center_of_mass()
# Load inital ref frames (only need to do this once)
ref0 = get_ref_kras()
def getKRASstates(u,kras_indices):
"""Gets states for all KRAS proteins in path."""
# res_shift = 8
# all_glycine = u.select_atoms("resname GLY")
# kras_indices = []
# for i in range(0, len(all_glycine), 26):
# kras_indices.append(all_glycine[i].index)
########## Below is taken out of the function so it is only done once #########
# kras_indices = get_protein_info(u,'resname ACE1 and name BB')
########## Above is taken out of the function so it is only done once #########
# CS, for x4 cases:
# [{protein_x4: (protein_type, num_res)}]
protein_systems = [{'ras4a': ('RAS-ONLY', 185),
'ras4araf': ('RAS-RAF', 321),
'ras': ('RAS-ONLY', 184),
'rasraf': ('RAS-RAF', 320)}]
ALLOUT = []
for k in range(len(kras_indices)):
start_of_g = kras_indices[k][0]
protein_x4 = str(kras_indices[k][1])
try:
protein_type = [item[protein_x4] for item in protein_systems][0][0] # 'RAS-ONLY' OR 'RAS-RAF'
num_res = [item[protein_x4] for item in protein_systems][0][1]
except:
LOGGER.error('Check KRas naming between modules')
raise Exception('Error: unknown KRas name')
# TODO: CS, replacing this comment section with the above, to handle x4 protein types
# ---------------------------------------
# ALLOUT = []
# for k in range(len(kras_indices)):
# start_of_g = kras_indices[k][0]
# protein_type = str(kras_indices[k][1])
# ########## BELOW SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ##############
# ########## POTENTIALLY REDO WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) #######
# ########## HAS BEEN REDONE WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) ########
# # if len(kras_indices) == 1:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB') ####### HAS TO BE FIXED FOR BACKBONE ATOMS FOR SPECIFIC PROTEIN
# # elif len(kras_indices) > 1:
# # if k == len(kras_indices)-1:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB')
# # else:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(kras_indices[k+1][0])+' and name BB')
# ########## ABOVE SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ##############
#
# ########## Below hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations #########################
# if protein_type == 'RAS-ONLY':
# num_res = RAS_only_num_res
# elif protein_type == 'RAS-RAF':
# num_res = RAS_RAF_num_res
# ########## Above hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations #########################
# ---------------------------------------
# TODO: TSC, I changed the selection below, which can be used for the make_whole...
# krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res)+' and (name CA or name BB)')
krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res))
krases0_BB.guess_bonds()
r2_26r40_56r69_166 = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+24)+' ' +\
str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\
str(start_of_g+67)+':'+str(start_of_g+164)+\
' and (name CA or name BB)')
u_selection = \
'resid '+str(start_of_g)+':'+str(start_of_g+24)+' '+str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\
str(start_of_g+67)+':'+str(start_of_g+164)+' and (name CA or name BB)'
mobile0 = u.select_atoms(str(u_selection)).positions - u.select_atoms(str(u_selection)).center_of_mass()
# TODO: CS, something wrong with ref0 from get_kras_ref()
# just making ref0 = mobile0 to test for now
# ref0 = mobile0
# TSC removed this
R, RMSD_junk = align.rotation_matrix(mobile0, ref0)
######## TODO: TSC, Adjusted for AA lipid names ########
# lipids = u.select_atoms('resname POPX POPC PAPC POPE DIPE DPSM PAPS PAP6 CHOL')
lipids = u.select_atoms('resname POPC PAPC POPE DIPE SSM PAPS SAPI CHL1')
coords = ref0
RotMat = []
OS = []
r152_165 = krases0_BB.select_atoms('resid '+str(start_of_g+150)+':'+str(start_of_g+163)+' and (name CA or name BB)')
r65_74 = krases0_BB.select_atoms('resid '+str(start_of_g+63)+':'+str(start_of_g+72)+' and (name CA or name BB)')
timeframes = []
# TODO: CS, for AA need bonds to run make_whole()
# krases0_BB.guess_bonds()
# TODO: CS, turn off for now to test beyond this point
''' *** for AA, need to bring that back on once all else runs ***
'''
# @Tim and Chris S. this was commented out - please check.
#make_whole(krases0_BB)
j, rmsd_junk = mda.analysis.align.rotation_matrix((r2_26r40_56r69_166.positions-r2_26r40_56r69_166.center_of_mass()), coords)
RotMat.append(j)
OS.append(r65_74.center_of_mass()-r152_165.center_of_mass())
timeframes.append(u.trajectory.time)
if protein_type == 'RAS-RAF':
z_pos = []
############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES BELOW ####################
############### TODO: TSC, zshifting is set to -1 (instead of -2), as there are ACE caps that are separate residues in AA
#zshifting=-1
if protein_x4 == 'rasraf':
zshifting = -1
elif protein_x4 == 'ras4araf':
zshifting = 0
else:
zshifting = 0
LOGGER.error('Found unsupported protein_x4 type')
raf_loops_selection = u.select_atoms('resid '+str(start_of_g+zshifting+291)+':'+str(start_of_g+zshifting+294)+' ' +\
str(start_of_g+zshifting+278)+':'+str(start_of_g+zshifting+281)+' ' +\
' and (name CA or name BB)')
############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES ABOVE ####################
diff = (lipids.center_of_mass()[2]-raf_loops_selection.center_of_mass(unwrap=True)[2])/10
if diff < 0:
diff = diff+(u.dimensions[2]/10)
z_pos.append(diff)
z_pos = np.array(z_pos)
RotMatNP = np.array(RotMat)
OS = np.array(OS)
OA = RotMatNP[:, 2, :]/(((RotMatNP[:, 2, 0]**2)+(RotMatNP[:, 2, 1]**2)+(RotMatNP[:, 2, 2]**2))**0.5)[:, None]
OWAS = np.arccos(RotMatNP[:, 2, 2])*180/math.pi
OC_temp = np.concatenate((OA, OS), axis=1)
t = ((OC_temp[:, 0]*OC_temp[:, 3])+(OC_temp[:, 1]*OC_temp[:, 4]) +
(OC_temp[:, 2]*OC_temp[:, 5]))/((OC_temp[:, 0]**2)+(OC_temp[:, 1]**2)+(OC_temp[:, 2]**2))
OC = OA*t[:, None]
ORS_tp = np.concatenate((OC, OS), axis=1)
ORS_norm = (((ORS_tp[:, 3]-ORS_tp[:, 0])**2)+((ORS_tp[:, 4]-ORS_tp[:, 1])**2)+((ORS_tp[:, 5]-ORS_tp[:, 2])**2))**0.5
ORS = (OS - OC)/ORS_norm[:, None]
OACRS = np.cross(OA, ORS)
OZCA = OA * OA[:, 2][:, None]
Z_unit = np.full([len(OZCA), 3], 1)
Z_adjust = np.array([0, 0, 1])
Z_unit = Z_unit*Z_adjust
Z_OZCA = Z_unit-OZCA
OZPACB = Z_OZCA/((Z_OZCA[:, 0]**2+Z_OZCA[:, 1]**2+Z_OZCA[:, 2]**2)**0.5)[:, None]
OROTNOTSIGNED = np.zeros([len(ORS)])
for i in range(len(ORS)):
OROTNOTSIGNED[i] = np.arccos(np.dot(OZPACB[i, :], ORS[i, :]) /
(np.sqrt(np.dot(OZPACB[i, :], OZPACB[i, :]))) *
(np.sqrt(np.dot(ORS[i, :], ORS[i, :]))))*180/math.pi
OZPACBCRS_cross = np.cross(OZPACB, ORS)
OZPACBCRS = OZPACBCRS_cross/((OZPACBCRS_cross[:, 0]**2+OZPACBCRS_cross[:, 1]**2+OZPACBCRS_cross[:, 2]**2)**0.5)[:, None]
OFORSIGN_temp = (OA - OZPACBCRS)**2
OFORSIGN = OFORSIGN_temp[:, 0]+OFORSIGN_temp[:, 1]+OFORSIGN_temp[:, 2]
OROT = OROTNOTSIGNED
for i in range(len(OROT)):
if OROT[i] < 0:
OROT[i] = -(OROT[i])
for i in range(len(OROT)):
if OFORSIGN[i] < 0.25:
OROT[i] = -(OROT[i])
###### Below introduces new shift to account for upper vs. lower leaflet #####
for i in range(len(OWAS)):
OWAS[i] = abs(-(OWAS[i])+180) # made this an absolute value so that the tilt remains positive
for i in range(len(OROT)):
if OROT[i] < 0:
OROT[i] = OROT[i]+180
elif OROT[i] > 0:
OROT[i] = OROT[i]-180
###### Above introduces new shift to account for upper vs. lower leaflet #####
###### Below might have to be updated to take into account the periodic nature of the rotation ######
if protein_type == 'RAS-ONLY':
states = np.zeros(len(OROT))
for j in range(len(OROT)):
diff0 = []
for i in range(len(RAS_ONLY_macrostate)):
#diff0.append([((RAS_ONLY_macrostate[i,0]-OWAS[j])**2+(RAS_ONLY_macrostate[i,1]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,6]])
diff0.append([((RAS_ONLY_macrostate[i,1]-OWAS[j])**2+(RAS_ONLY_macrostate[i,0]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,5]])
diff0.sort()
states[j] = diff0[0][1]
elif protein_type == 'RAS-RAF':
states = np.zeros(len(OROT))
for j in range(len(OROT)):
### below: adding in the requirements for the 'high-z' state ###
if (OROT[j] < -45 or OROT[j] > 140) and z_pos[j] > 4.8:
states[j] = 3
else:
### above: adding in the requirements for the 'high-z' state ###
diff0 = []
for i in range(len(RAS_RAF_macrostate)):
#diff0.append([((RAS_RAF_macrostate[i,0]-OWAS[j])**2+(RAS_RAF_macrostate[i,1]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,6]])
diff0.append([((RAS_RAF_macrostate[i,1]-OWAS[j])**2+(RAS_RAF_macrostate[i,0]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,4]])
diff0.sort()
states[j] = diff0[0][1]
###### Above might have to be updated to take into account the periodic nature of the rotation ######
###### Assume we want to remove this? Where is the code that reads this information? i.e. will there be knock-on effects? ######
###### If feedback code needs index 5 (two_states) from the output, deleting this four_states will shift that to index 4 #######
# four_states = np.zeros(len(OROT))
# for j in range(len(OROT)):
# diff0 = []
# for i in range(len(macrostate4)):
# diff0.append([((macrostate4[i,0]-OWAS[j])**2+(macrostate4[i,1]-OROT[j])**2)**0.5, macrostate4[i,6]])
# diff0.sort()
# four_states[j] = diff0[0][1]+1
###### below: old output details.... ######################################
###### Updated - RAS-only to NOT HAVE the Z-distance ######################
###### Updated - Added in the protein 'tag', i.e. RAS-ONLY or RAS-RAF #####
# OUTPUT = np.zeros([len(OROT), 6])
# for i in range(len(OROT)):
# OUTPUT[i] = timeframes[i], OWAS[i], OROT[i], z_pos[i], four_states[i], two_states[i]
###### above: old output details.... ######################################
###### below: NEW output details.... ######################################
if protein_type == 'RAS-ONLY':
OUTPUT = np.zeros([len(OROT), 6]).astype(object)
for i in range(len(OROT)):
OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], 'n/a', int(states[i])
elif protein_type == 'RAS-RAF':
OUTPUT = np.zeros([len(OROT), 6]).astype(object)
for i in range(len(OROT)):
OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], z_pos[i], int(states[i])
ALLOUT.append(OUTPUT)
return np.asarray(ALLOUT)
#np.savetxt(str(tpr)+"_tilt_rot_z_state.KRAS_"+str(k+1)+".txt", OUTPUT, fmt=['%i','%10.3f','%10.3f','%10.3f','%i','%i'], delimiter=' ')
| 47.072776
| 173
| 0.57129
| 2,476
| 17,464
| 3.874394
| 0.173667
| 0.026269
| 0.030022
| 0.03784
| 0.450328
| 0.377254
| 0.333472
| 0.303346
| 0.279266
| 0.237048
| 0
| 0.030961
| 0.243587
| 17,464
| 370
| 174
| 47.2
| 0.695231
| 0.398477
| 0
| 0.183333
| 0
| 0
| 0.061358
| 0.003379
| 0
| 0
| 0
| 0.008108
| 0
| 1
| 0.022222
| false
| 0
| 0.055556
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab82df482a2f9c96080ef85619210a77bdeb9a0
| 3,676
|
py
|
Python
|
homeassistant/components/switch/hikvisioncam.py
|
maddox/home-assistant
|
6624cfefd6ea81b559085779173b91a3dc6bd349
|
[
"MIT"
] | 1
|
2015-09-13T21:10:09.000Z
|
2015-09-13T21:10:09.000Z
|
homeassistant/components/switch/hikvisioncam.py
|
michaelarnauts/home-assistant
|
7d905e6c0c99a4454de26d63af0581b454f01ca1
|
[
"MIT"
] | null | null | null |
homeassistant/components/switch/hikvisioncam.py
|
michaelarnauts/home-assistant
|
7d905e6c0c99a4454de26d63af0581b454f01ca1
|
[
"MIT"
] | 1
|
2020-05-07T08:48:36.000Z
|
2020-05-07T08:48:36.000Z
|
"""
homeassistant.components.switch.hikvision
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support turning on/off motion detection on Hikvision cameras.
Note: Currently works using default https port only.
CGI API Guide: http://bit.ly/1RuyUuF
Configuration:
To use the Hikvision motion detection switch you will need to add something
like the following to your config/configuration.yaml
switch:
platform: hikvisioncam
name: Hikvision Cam 1 Motion Detection
host: 192.168.1.32
username: YOUR_USERNAME
password: YOUR_PASSWORD
Variables:
host
*Required
This is the IP address of your Hikvision camera. Example: 192.168.1.32
username
*Required
Your Hikvision camera username.
password
*Required
Your Hikvision camera username.
name
*Optional
The name to use when displaying this switch instance.
"""
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import STATE_ON, STATE_OFF
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
import logging
try:
import hikvision.api
from hikvision.error import HikvisionError, MissingParamError
except ImportError:
hikvision.api = None
_LOGGING = logging.getLogger(__name__)
REQUIREMENTS = ['hikvision==0.4']
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Setup Hikvision Camera config. """
host = config.get(CONF_HOST, None)
port = config.get('port', "80")
name = config.get('name', "Hikvision Camera Motion Detection")
username = config.get(CONF_USERNAME, "admin")
password = config.get(CONF_PASSWORD, "12345")
if hikvision.api is None:
_LOGGING.error((
"Failed to import hikvision. Did you maybe not install the "
"'hikvision' dependency?"))
return False
try:
hikvision_cam = hikvision.api.CreateDevice(
host, port=port, username=username,
password=password, is_https=False)
except MissingParamError as param_err:
_LOGGING.error("Missing required param: %s", param_err)
return False
except HikvisionError as conn_err:
_LOGGING.error("Unable to connect: %s", conn_err)
return False
add_devices_callback([
HikvisionMotionSwitch(name, hikvision_cam)
])
class HikvisionMotionSwitch(ToggleEntity):
""" Provides a switch to toggle on/off motion detection. """
def __init__(self, name, hikvision_cam):
self._name = name
self._hikvision_cam = hikvision_cam
self._state = STATE_OFF
@property
def should_poll(self):
""" Poll for status regularly. """
return True
@property
def name(self):
""" Returns the name of the device if any. """
return self._name
@property
def state(self):
""" Returns the state of the device if any. """
return self._state
@property
def is_on(self):
""" True if device is on. """
return self._state == STATE_ON
def turn_on(self, **kwargs):
""" Turn the device on. """
_LOGGING.info("Turning on Motion Detection ")
self._hikvision_cam.enable_motion_detection()
def turn_off(self, **kwargs):
""" Turn the device off. """
_LOGGING.info("Turning off Motion Detection ")
self._hikvision_cam.disable_motion_detection()
def update(self):
""" Update Motion Detection state """
enabled = self._hikvision_cam.is_motion_detection_enabled()
_LOGGING.info('enabled: %s', enabled)
self._state = STATE_ON if enabled else STATE_OFF
| 27.029412
| 76
| 0.681991
| 447
| 3,676
| 5.454139
| 0.317673
| 0.067678
| 0.026251
| 0.016407
| 0.108285
| 0.021329
| 0.021329
| 0
| 0
| 0
| 0
| 0.010094
| 0.218444
| 3,676
| 135
| 77
| 27.22963
| 0.838496
| 0.322905
| 0
| 0.145161
| 0
| 0
| 0.108409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145161
| false
| 0.048387
| 0.129032
| 0
| 0.403226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab942559c3b1955ee240520738d5bee4d16cc10
| 5,689
|
py
|
Python
|
src/richie/apps/search/filter_definitions/mixins.py
|
leduong/richie
|
bf7ed379b7e2528cd790dadcec10ac2656efd189
|
[
"MIT"
] | null | null | null |
src/richie/apps/search/filter_definitions/mixins.py
|
leduong/richie
|
bf7ed379b7e2528cd790dadcec10ac2656efd189
|
[
"MIT"
] | null | null | null |
src/richie/apps/search/filter_definitions/mixins.py
|
leduong/richie
|
bf7ed379b7e2528cd790dadcec10ac2656efd189
|
[
"MIT"
] | null | null | null |
"""Define mixins to easily compose custom FilterDefinition classes."""
class TermsQueryMixin:
"""A mixin for filter definitions that need to apply term queries."""
def get_query_fragment(self, data):
"""Build the query fragments as term queries for each selected value."""
value_list = data.get(self.name)
# For terms filters, as the name implies, it's a simple terms fragment
return (
[{"key": self.name, "fragment": [{"terms": {self.term: value_list}}]}]
if value_list
else []
)
class ChoicesQueryMixin:
"""A mixin for filter definitions that need to apply predefined queries."""
def get_query_fragment(self, data):
"""Pick the hardcoded query fragment for each selected value."""
fragment_map = self.get_fragment_map()
return [
{"key": self.name, "fragment": fragment_map[value]}
for value in data.get(self.name, [])
]
class ChoicesAggsMixin:
"""A mixin for filter definitions that need to apply aggregations for predefined choices."""
# pylint: disable=unused-argument
def get_aggs_fragment(self, queries, *args, **kwargs):
"""
Build the aggregations as a set of filters, one for each possible value of the field.
"""
return {
# Create a custom aggregation for each possible choice for this filter
# eg `availability@coming_soon` & `availability@current` & `availability@open`
"{:s}@{:s}".format(self.name, choice_key): {
"filter": {
"bool": {
# Use all the query fragments from the queries *but* the one(s) that
# filter on the current filter: we manually add back the only one that
# is relevant to the current choice.
"must": choice_fragment
+ [
clause
for kf_pair in queries
for clause in kf_pair["fragment"]
if kf_pair["key"] is not self.name
]
}
}
}
for choice_key, choice_fragment in self.get_fragment_map().items()
}
class NestedChoicesAggsMixin:
"""
A mixin for filter definitions that are related to a nested field. The aggregation filter can
only be recomputed at the level of the parent because it should group all queries of fields
nested below the parent.
"""
# pylint: disable=unused-argument
def get_aggs_fragment(self, queries, data, parent, *args, **kwargs):
"""
Computing aggregations for a nested field is DIFFICULT because query fragments related to
nested fields are grouped under their common path. For example combined filters on
availability and languages would lead to a query like:
{
"query": {
"nested": {
"path": "course_runs",
"query": {
"bool": {
"must": [
{"range": {"course_runs.end": {"lte": "01-01-2019"}}},
{"terms": {"course_runs.languages": ["de", "en", fr"]}},
]
}
},
}
}
}
In this example, computing the facet count for the French filter, is done with the
following filter (excluding the filter on English and German so we only count French):
{
"query": {
"nested": {
"path": "course_runs",
"query": {
"bool": {
"must": [
{"range": {"course_runs.end": {"lte": "01-01-2019"}}},
{"terms": {"course_runs.languages": ["fr"]}},
]
}
},
}
}
}
This can only be built by calling the parent NestingWrapper with customized filter data.
"""
return {
# Create a custom aggregation for each possible choice for this filter
# eg `availability@coming_soon` & `availability@current` & `availability@open`
"{:s}@{:s}".format(self.name, choice_key): {
"filter": {
"bool": {
# Use all the query fragments from the queries (the nesting parent is
# responsible for excluding the queries related to nested fields so we
# have to manually add them, making sure to apply on the current field
# only the current choice.
"must": [
clause
for kf_pair in (
queries
+ parent.get_query_fragment(
# override data with only the current choice
{**data, self.name: [choice_key]}
)
)
for clause in kf_pair["fragment"]
]
}
}
}
for choice_key, choice_fragment in self.get_fragment_map().items()
}
| 41.830882
| 97
| 0.4746
| 542
| 5,689
| 4.904059
| 0.289668
| 0.024078
| 0.013544
| 0.022573
| 0.434537
| 0.415726
| 0.36757
| 0.341986
| 0.341986
| 0.295711
| 0
| 0.005041
| 0.442081
| 5,689
| 135
| 98
| 42.140741
| 0.832388
| 0.51749
| 0
| 0.296296
| 0
| 0
| 0.039132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab9a4f6d4ca5cd5f443cec5fb87c6e2f96318a3
| 12,830
|
py
|
Python
|
electrumsv/gui/qt/receive_view.py
|
AustEcon/electrumsv
|
db924efc69f091f39e7d02e7f2d7a71350f4e6af
|
[
"MIT"
] | 1
|
2019-07-04T03:35:32.000Z
|
2019-07-04T03:35:32.000Z
|
electrumsv/gui/qt/receive_view.py
|
AustEcon/electrumsv
|
db924efc69f091f39e7d02e7f2d7a71350f4e6af
|
[
"MIT"
] | null | null | null |
electrumsv/gui/qt/receive_view.py
|
AustEcon/electrumsv
|
db924efc69f091f39e7d02e7f2d7a71350f4e6af
|
[
"MIT"
] | null | null | null |
from typing import List, Optional, TYPE_CHECKING
import weakref
from PyQt5.QtCore import QEvent, Qt
from PyQt5.QtWidgets import (QComboBox, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit,
QVBoxLayout, QWidget)
from electrumsv.app_state import app_state
from electrumsv.bitcoin import script_template_to_string
from electrumsv.constants import PaymentFlag, RECEIVING_SUBPATH
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.wallet_database.tables import KeyInstanceRow
from electrumsv import web
from .amountedit import AmountEdit, BTCAmountEdit
from .constants import expiration_values
if TYPE_CHECKING:
from .main_window import ElectrumWindow
from .qrcodewidget import QRCodeWidget
from .qrwindow import QR_Window
from .request_list import RequestList
from .table_widgets import TableTopButtonLayout
from .util import ButtonsLineEdit, EnterButton, HelpLabel
class ReceiveView(QWidget):
_qr_window: Optional[QR_Window] = None
def __init__(self, main_window: 'ElectrumWindow', account_id: int) -> None:
super().__init__(main_window)
self._main_window = weakref.proxy(main_window)
self._account_id = account_id
self._account = main_window._wallet.get_account(account_id)
self._logger = logs.get_logger(f"receive-view[{self._account_id}]")
self._receive_key_id: Optional[int] = None
self._request_list_toolbar_layout = TableTopButtonLayout()
self._request_list_toolbar_layout.refresh_signal.connect(
self._main_window.refresh_wallet_display)
self._request_list_toolbar_layout.filter_signal.connect(self._filter_request_list)
form_layout = self.create_form_layout()
self._request_list = RequestList(self, main_window)
request_container = self.create_request_list_container()
vbox = QVBoxLayout(self)
vbox.addLayout(form_layout)
vbox.addSpacing(20)
vbox.addWidget(request_container, 1)
self.setLayout(vbox)
def clean_up(self) -> None:
# If there are no accounts there won't be a receive QR code object created yet.
if self._receive_qr is not None:
self._receive_qr.clean_up()
if self._qr_window is not None:
self._qr_window.close()
def create_form_layout(self) -> QHBoxLayout:
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self._receive_destination_e = ButtonsLineEdit()
self._receive_destination_e.addCopyButton(app_state.app)
self._receive_destination_e.setReadOnly(True)
msg = _('Bitcoin SV payment destination where the payment should be received. '
'Note that each payment request uses a different Bitcoin SV payment destination.')
receive_address_label = HelpLabel(_('Receiving destination'), msg)
self._receive_destination_e.textChanged.connect(self._update_receive_qr)
self._receive_destination_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(receive_address_label, 0, 0)
grid.addWidget(self._receive_destination_e, 0, 1, 1, -1)
self._receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self._receive_message_e, 1, 1, 1, -1)
self._receive_message_e.textChanged.connect(self._update_receive_qr)
self._receive_amount_e = BTCAmountEdit()
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self._receive_amount_e, 2, 1)
self._receive_amount_e.textChanged.connect(self._update_receive_qr)
self._fiat_receive_e = AmountEdit(app_state.fx.get_currency if app_state.fx else '')
if not app_state.fx or not app_state.fx.is_enabled():
self._fiat_receive_e.setVisible(False)
grid.addWidget(self._fiat_receive_e, 2, 2, Qt.AlignLeft)
self._main_window.connect_fields(self._receive_amount_e, self._fiat_receive_e)
self._expires_combo = QComboBox()
self._expires_combo.addItems([i[0] for i in expiration_values])
self._expires_combo.setCurrentIndex(3)
self._expires_combo.setFixedWidth(self._receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them '
'a signed payment request.'),
_('Expired requests have to be deleted manually from your list, '
'in order to free the corresponding Bitcoin SV addresses.'),
_('The Bitcoin SV address never expires and will always be part '
'of this ElectrumSV wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self._expires_combo, 3, 1)
self._expires_label = QLineEdit('')
self._expires_label.setReadOnly(1)
self._expires_label.setFocusPolicy(Qt.NoFocus)
self._expires_label.hide()
grid.addWidget(self._expires_label, 3, 1)
self._save_request_button = EnterButton(_('Save request'), self._save_form_as_request)
self._new_request_button = EnterButton(_('New'), self._new_payment_request)
self._receive_qr = QRCodeWidget(fixedSize=200)
self._receive_qr.link_to_window(self._toggle_qr_window)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self._save_request_button)
buttons.addWidget(self._new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self._receive_qr)
return hbox
def create_request_list_container(self) -> QGroupBox:
layout = QVBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(6, 0, 6, 6)
layout.addLayout(self._request_list_toolbar_layout)
layout.addWidget(self._request_list)
request_box = QGroupBox()
request_box.setTitle(_('Requests'))
request_box.setAlignment(Qt.AlignCenter)
request_box.setContentsMargins(0, 0, 0, 0)
request_box.setLayout(layout)
return request_box
def update_widgets(self) -> None:
self._request_list.update()
def update_destination(self) -> None:
text = ""
if self._receive_key_id is not None:
script_template = self._account.get_script_template_for_id(self._receive_key_id)
if script_template is not None:
text = script_template_to_string(script_template)
self._receive_destination_e.setText(text)
def update_contents(self) -> None:
self._expires_label.hide()
self._expires_combo.show()
if self._account.is_deterministic():
fresh_key = self._account.get_fresh_keys(RECEIVING_SUBPATH, 1)[0]
self.set_receive_key(fresh_key)
def update_for_fx_quotes(self) -> None:
if self._account_id is not None:
edit = (self._fiat_receive_e
if self._fiat_receive_e.is_last_edited else self._receive_amount_e)
edit.textEdited.emit(edit.text())
# Bound to text fields in `_create_receive_form_layout`.
def _update_receive_qr(self) -> None:
if self._receive_key_id is None:
return
amount = self._receive_amount_e.get_amount()
message = self._receive_message_e.text()
self._save_request_button.setEnabled((amount is not None) or (message != ""))
script_template = self._account.get_script_template_for_id(self._receive_key_id)
address_text = script_template_to_string(script_template)
uri = web.create_URI(address_text, amount, message)
self._receive_qr.setData(uri)
if self._qr_window and self._qr_window.isVisible():
self._qr_window.set_content(self._receive_destination_e.text(), amount,
message, uri)
def _toggle_qr_window(self, event: QEvent) -> None:
if self._receive_key_id is None:
self.show_message(_("No available receiving destination."))
return
if not self._qr_window:
self._qr_window = QR_Window(self)
self._qr_window.setVisible(True)
self._qr_window_geometry = self._qr_window.geometry()
else:
if not self._qr_window.isVisible():
self._qr_window.setVisible(True)
self._qr_window.setGeometry(self._qr_window_geometry)
else:
self._qr_window_geometry = self._qr_window.geometry()
self._qr_window.setVisible(False)
self._update_receive_qr()
def set_fiat_ccy_enabled(self, flag: bool) -> None:
self._fiat_receive_e.setVisible(flag)
def get_bsv_edits(self) -> List[BTCAmountEdit]:
return [ self._receive_amount_e ]
def _save_form_as_request(self) -> None:
if not self._receive_key_id:
self._main_window.show_error(_('No receiving payment destination'))
return
amount = self._receive_amount_e.get_amount()
message = self._receive_message_e.text()
if not message and not amount:
self._main_window.show_error(_('No message or amount'))
return
def callback(exc_value: Optional[Exception]=None) -> None:
if exc_value is not None:
raise exc_value # pylint: disable=raising-bad-type
self._request_list.update_signal.emit()
i = self._expires_combo.currentIndex()
expiration = [x[1] for x in expiration_values][i]
row = self._account.requests.get_request_for_key_id(self._receive_key_id)
if row is None:
row = self._account.requests.create_request(self._receive_key_id,
PaymentFlag.UNPAID, amount, expiration, message, callback)
else:
# Expiration is just a label, so we don't use the value.
self._account.requests.update_request(row.paymentrequest_id, row.state, amount,
row.expiration, message, callback)
self._save_request_button.setEnabled(False)
def _new_payment_request(self) -> None:
keyinstances: List[KeyInstanceRow] = []
if self._account.is_deterministic():
keyinstances = self._account.get_fresh_keys(RECEIVING_SUBPATH, 1)
if not len(keyinstances):
if not self._account.is_deterministic():
msg = [
_('No more payment destinations in your wallet.'),
_('You are using a non-deterministic account, which '
'cannot create new payment destinations.'),
_('If you want to create new payment destinations, '
'use a deterministic account instead.')
]
self._main_window.show_message(' '.join(msg))
return
self._main_window.show_message(
_('Your wallet is broken and could not allocate a new payment destination.'))
self.update_contents()
self._new_request_button.setEnabled(False)
self._receive_message_e.setFocus(1)
def get_receive_key_id(self) -> Optional[int]:
return self._receive_key_id
# Only called from key list menu.
def receive_at_id(self, key_id: int) -> None:
self._receive_key_id = key_id
self._new_request_button.setEnabled(True)
self.update_destination()
self._main_window.show_receive_tab()
def set_receive_key_id(self, key_id: int) -> None:
self._receive_key_id = key_id
def set_receive_key(self, keyinstance: KeyInstanceRow) -> None:
self._receive_key_id = keyinstance.keyinstance_id
self._receive_message_e.setText("")
self._receive_amount_e.setAmount(None)
self.update_destination()
def set_form_contents(self, address_text: str, value: int, description: Optional[str]=None,
expires_description: str="") -> None:
self._receive_destination_e.setText(address_text)
self._receive_message_e.setText(description or "")
self._receive_amount_e.setAmount(value)
self._expires_combo.hide()
self._expires_label.show()
self._expires_label.setText(expires_description)
self._new_request_button.setEnabled(True)
def set_new_button_enabled(self, flag: bool) -> None:
self._new_request_button.setEnabled(flag)
def _filter_request_list(self, text: str) -> None:
self._request_list.filter(text)
| 41.928105
| 98
| 0.676695
| 1,584
| 12,830
| 5.128157
| 0.183712
| 0.063646
| 0.025114
| 0.025606
| 0.236366
| 0.14342
| 0.117075
| 0.093808
| 0.054167
| 0.042103
| 0
| 0.006231
| 0.237023
| 12,830
| 305
| 99
| 42.065574
| 0.823577
| 0.028995
| 0
| 0.122449
| 0
| 0
| 0.078394
| 0.00257
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089796
| false
| 0
| 0.077551
| 0.008163
| 0.212245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ab9d713b15cf7e2722180a91c20d945c012ee0e
| 514
|
py
|
Python
|
test/crossrunner/compat.py
|
BluechipSystems/thrift
|
c595aa18cba0032e074f9585aa2d6ca548f07197
|
[
"Apache-2.0"
] | null | null | null |
test/crossrunner/compat.py
|
BluechipSystems/thrift
|
c595aa18cba0032e074f9585aa2d6ca548f07197
|
[
"Apache-2.0"
] | null | null | null |
test/crossrunner/compat.py
|
BluechipSystems/thrift
|
c595aa18cba0032e074f9585aa2d6ca548f07197
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
if sys.version_info[0] == 2:
_ENCODE = sys.getfilesystemencoding()
def path_join(*args):
bin_args = map(lambda a: a.decode(_ENCODE), args)
return os.path.join(*bin_args).encode(_ENCODE)
def str_join(s, l):
bin_args = map(lambda a: a.decode(_ENCODE), l)
b = s.decode(_ENCODE)
return b.join(bin_args).encode(_ENCODE)
logfile_open = open
else:
path_join = os.path.join
str_join = str.join
def logfile_open(*args):
return open(*args, errors='replace')
| 20.56
| 53
| 0.678988
| 81
| 514
| 4.098765
| 0.358025
| 0.096386
| 0.060241
| 0.096386
| 0.319277
| 0.180723
| 0.180723
| 0.180723
| 0
| 0
| 0
| 0.004773
| 0.184825
| 514
| 24
| 54
| 21.416667
| 0.78759
| 0
| 0
| 0
| 0
| 0
| 0.013619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.117647
| 0.058824
| 0.470588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aba555a9c95d6e5cd6afe857fa51108b432e61a
| 1,518
|
py
|
Python
|
test/test_vom.py
|
usamaahmadkhan/vpp
|
cece3e682f6dba68ba86b66b295f99a33496d9ee
|
[
"Apache-2.0"
] | null | null | null |
test/test_vom.py
|
usamaahmadkhan/vpp
|
cece3e682f6dba68ba86b66b295f99a33496d9ee
|
[
"Apache-2.0"
] | null | null | null |
test/test_vom.py
|
usamaahmadkhan/vpp
|
cece3e682f6dba68ba86b66b295f99a33496d9ee
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
""" VAPI test """
import unittest
import os
import signal
from framework import VppTestCase, running_extended_tests, \
VppTestRunner, Worker
@unittest.skipUnless(running_extended_tests(), "part of extended tests")
class VOMTestCase(VppTestCase):
""" VPP Object Model Test """
def test_vom_cpp(self):
""" run C++ VOM tests """
var = "TEST_DIR"
built_root = os.getenv(var, None)
self.assertIsNotNone(built_root,
"Environment variable `%s' not set" % var)
executable = "%s/build/vom_test/vom_test" % built_root
worker = Worker(
[executable, "vpp object model", self.shm_prefix], self.logger)
worker.start()
timeout = 120
worker.join(timeout)
self.logger.info("Worker result is `%s'" % worker.result)
error = False
if worker.result is None:
try:
error = True
self.logger.error(
"Timeout! Worker did not finish in %ss" % timeout)
os.killpg(os.getpgid(worker.process.pid), signal.SIGTERM)
worker.join()
except:
raise Exception("Couldn't kill worker-spawned process")
if error:
raise Exception(
"Timeout! Worker did not finish in %ss" % timeout)
self.assert_equal(worker.result, 0, "Binary test return code")
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 33
| 75
| 0.590909
| 171
| 1,518
| 5.116959
| 0.502924
| 0.054857
| 0.045714
| 0.043429
| 0.082286
| 0.082286
| 0.082286
| 0.082286
| 0
| 0
| 0
| 0.003788
| 0.304348
| 1,518
| 45
| 76
| 33.733333
| 0.824811
| 0.04809
| 0
| 0.057143
| 0
| 0
| 0.187237
| 0.018233
| 0
| 0
| 0
| 0
| 0.057143
| 1
| 0.028571
| false
| 0
| 0.114286
| 0
| 0.171429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6abaa4631fe046cd2892f35a91bca62bc7f0f887
| 3,096
|
py
|
Python
|
locations/spiders/tesco.py
|
bealbrown/allhours
|
f750ee7644246a97bd16879f14115d7845f76b89
|
[
"MIT"
] | null | null | null |
locations/spiders/tesco.py
|
bealbrown/allhours
|
f750ee7644246a97bd16879f14115d7845f76b89
|
[
"MIT"
] | null | null | null |
locations/spiders/tesco.py
|
bealbrown/allhours
|
f750ee7644246a97bd16879f14115d7845f76b89
|
[
"MIT"
] | null | null | null |
import json
import re
import scrapy
from locations.hourstudy import inputoutput
DAYS = {
'mo': 'Mo',
'tu': 'Tu',
'we': 'We',
'fr': 'Fr',
'th': 'Th',
'sa': 'Sa',
'su': 'Su',
}
class TescoSpider(scrapy.Spider):
name = "tesco"
allowed_domains = ["tescolocation.api.tesco.com"]
def store_hours(self, store_hours):
clean_time=''
for key, value in store_hours.items():
if('isOpen' in value and 'open' in value and 'close' in value):
if(value['isOpen']=='true'):
clean_time = clean_time + DAYS[key]+' '+value['open'][0:2]+':'+value['open'][2:]+'-'+value['close'][0:2]+':'+value['close'][2:]+';'
else:
clean_time = clean_time + DAYS[key]+' '+'Closed'+';'
return clean_time
def start_requests(self):
url = 'https://tescolocation.api.tesco.com/v3/locations/search?offset=0&limit=1000000&sort=near:%2251.499207299999995,-0.08800609999999999%22&filter=category:Store%20AND%20isoCountryCode:x-uk&fields=name,geo,openingHours,altIds.branchNumber,contact'
headers = {
'Accept-Language': 'en-US,en;q=0.9',
'Origin': 'https://www.tesco.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': 'https://www.kfc.com/store-locator?query=90210',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'x-appkey':'store-locator-web-cde'
}
yield scrapy.http.FormRequest(
url=url, method='GET',
headers=headers, callback=self.parse
)
def parse(self, response):
data = json.loads(response.body_as_unicode())
stores = data['results']
for store in stores:
addr_full=''
for add in store['location']['contact']['address']['lines']:
addr_full=addr_full+' '+add['text']
properties = {
'ref': store['location']['id'],
'name': store['location']['name'],
'addr_full': addr_full,
'city': store['location']['contact']['address']['town'],
'state': '',
'country':'United Kingdom',
'postcode': store['location']['contact']['address']['postcode'],
'lat': store['location']['geo']['coordinates']['latitude'],
'lon': store['location']['geo']['coordinates']['longitude'],
'phone': store['location']['contact']['phoneNumbers'][0]['number'],
}
opening_hours = self.store_hours(store['location']['openingHours'][0]['standardOpeningHours'])
if opening_hours:
properties['opening_hours'] = opening_hours
raw = store['location']['openingHours'][0]['standardOpeningHours']
formatted = opening_hours
yield inputoutput(raw,formatted)
# yield inputoutput(**properties)
| 38.222222
| 257
| 0.549096
| 323
| 3,096
| 5.188854
| 0.473684
| 0.077566
| 0.047733
| 0.048329
| 0.084726
| 0.029833
| 0
| 0
| 0
| 0
| 0
| 0.031929
| 0.271641
| 3,096
| 80
| 258
| 38.7
| 0.711308
| 0.010013
| 0
| 0
| 0
| 0.015152
| 0.33998
| 0.026806
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.060606
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6abb4495b3d52a4655573442ecead7d8db0e2301
| 12,883
|
py
|
Python
|
astropy/table/serialize.py
|
tacaswell/astropy
|
75046e61916da36dffe87ddf59a7c6bfb00de81c
|
[
"BSD-3-Clause"
] | 1
|
2019-10-05T18:20:27.000Z
|
2019-10-05T18:20:27.000Z
|
astropy/table/serialize.py
|
tacaswell/astropy
|
75046e61916da36dffe87ddf59a7c6bfb00de81c
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/table/serialize.py
|
tacaswell/astropy
|
75046e61916da36dffe87ddf59a7c6bfb00de81c
|
[
"BSD-3-Clause"
] | null | null | null |
from importlib import import_module
import re
from copy import deepcopy
from collections import OrderedDict
from astropy.utils.data_info import MixinInfo
from .column import Column
from .table import Table, QTable, has_info_class
from astropy.units.quantity import QuantityInfo
__construct_mixin_classes = ('astropy.time.core.Time',
'astropy.time.core.TimeDelta',
'astropy.units.quantity.Quantity',
'astropy.coordinates.angles.Latitude',
'astropy.coordinates.angles.Longitude',
'astropy.coordinates.angles.Angle',
'astropy.coordinates.distances.Distance',
'astropy.coordinates.earth.EarthLocation',
'astropy.coordinates.sky_coordinate.SkyCoord',
'astropy.table.table.NdarrayMixin',
'astropy.table.column.MaskedColumn')
class SerializedColumn(dict):
"""
Subclass of dict that is a used in the representation to contain the name
(and possible other info) for a mixin attribute (either primary data or an
array-like attribute) that is serialized as a column in the table.
Normally contains the single key ``name`` with the name of the column in the
table.
"""
pass
def _represent_mixin_as_column(col, name, new_cols, mixin_cols,
exclude_classes=()):
"""Carry out processing needed to serialize ``col`` in an output table
consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This
relies on the object determine if any transformation is required and may
depend on the ``serialize_method`` and ``serialize_context`` context
variables. For instance a ``MaskedColumn`` may be stored directly to
FITS, but can also be serialized as separate data and mask columns.
This function builds up a list of plain columns in the ``new_cols`` arg (which
is passed as a persistent list). This includes both plain columns from the
original table and plain columns that represent data from serialized columns
(e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column).
For serialized columns the ``mixin_cols`` dict is updated with required
attributes and information to subsequently reconstruct the table.
Table mixin columns are always serialized and get represented by one
or more data columns. In earlier versions of the code *only* mixin
columns were serialized, hence the use within this code of "mixin"
to imply serialization. Starting with version 3.1, the non-mixin
``MaskedColumn`` can also be serialized.
"""
obj_attrs = col.info._represent_as_dict()
ordered_keys = col.info._represent_as_dict_attrs
# If serialization is not required (see function docstring above)
# or explicitly specified as excluded, then treat as a normal column.
if not obj_attrs or col.__class__ in exclude_classes:
new_cols.append(col)
return
# Subtlety here is handling mixin info attributes. The basic list of such
# attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'.
# - name: handled directly [DON'T store]
# - unit: DON'T store if this is a parent attribute
# - dtype: captured in plain Column if relevant [DON'T store]
# - format: possibly irrelevant but settable post-object creation [DO store]
# - description: DO store
# - meta: DO store
info = {}
for attr, nontrivial, xform in (('unit', lambda x: x is not None and x != '', str),
('format', lambda x: x is not None, None),
('description', lambda x: x is not None, None),
('meta', lambda x: x, None)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = xform(col_attr) if xform else col_attr
data_attrs = [key for key in ordered_keys if key in obj_attrs and
getattr(obj_attrs[key], 'shape', ())[:1] == col.shape[:1]]
for data_attr in data_attrs:
data = obj_attrs[data_attr]
# New column name combines the old name and attribute
# (e.g. skycoord.ra, skycoord.dec).unless it is the primary data
# attribute for the column (e.g. value for Quantity or data
# for MaskedColumn)
if data_attr == col.info._represent_as_dict_primary_data:
new_name = name
else:
new_name = name + '.' + data_attr
if not has_info_class(data, MixinInfo):
new_cols.append(Column(data, name=new_name, **info))
obj_attrs[data_attr] = SerializedColumn({'name': new_name})
else:
# recurse. This will define obj_attrs[new_name].
_represent_mixin_as_column(data, new_name, new_cols, obj_attrs)
obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name))
# Strip out from info any attributes defined by the parent
for attr in col.info.attrs_from_parent:
if attr in info:
del info[attr]
if info:
obj_attrs['__info__'] = info
# Store the fully qualified class name
obj_attrs['__class__'] = col.__module__ + '.' + col.__class__.__name__
mixin_cols[name] = obj_attrs
def represent_mixins_as_columns(tbl, exclude_classes=()):
"""Represent input Table ``tbl`` using only `~astropy.table.Column`
or `~astropy.table.MaskedColumn` objects.
This function represents any mixin columns like `~astropy.time.Time` in
``tbl`` to one or more plain ``~astropy.table.Column`` objects and returns
a new Table. A single mixin column may be split into multiple column
components as needed for fully representing the column. This includes the
possibility of recursive splitting, as shown in the example below. The
new column names are formed as ``<column_name>.<component>``, e.g.
``sc.ra`` for a `~astropy.coordinates.SkyCoord` column named ``sc``.
In addition to splitting columns, this function updates the table ``meta``
dictionary to include a dict named ``__serialized_columns__`` which provides
additional information needed to construct the original mixin columns from
the split columns.
This function is used by astropy I/O when writing tables to ECSV, FITS,
HDF5 formats.
Note that if the table does not include any mixin columns then the original
table is returned with no update to ``meta``.
Parameters
----------
tbl : `~astropy.table.Table` or subclass
Table to represent mixins as Columns
exclude_classes : tuple of classes
Exclude any mixin columns which are instannces of any classes in the tuple
Returns
-------
tbl : `~astropy.table.Table`
New Table with updated columns, or else the original input ``tbl``
Examples
--------
>>> from astropy.table import Table, represent_mixins_as_columns
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord
>>> x = [100.0, 200.0]
>>> obstime = Time([1999.0, 2000.0], format='jyear')
>>> sc = SkyCoord([1, 2], [3, 4], unit='deg', obstime=obstime)
>>> tbl = Table([sc, x], names=['sc', 'x'])
>>> represent_mixins_as_columns(tbl)
<Table length=2>
sc.ra sc.dec sc.obstime.jd1 sc.obstime.jd2 x
deg deg
float64 float64 float64 float64 float64
------- ------- -------------- -------------- -------
1.0 3.0 2451180.0 -0.25 100.0
2.0 4.0 2451545.0 0.0 200.0
"""
# Dict of metadata for serializing each column, keyed by column name.
# Gets filled in place by _represent_mixin_as_column().
mixin_cols = {}
# List of columns for the output table. For plain Column objects
# this will just be the original column object.
new_cols = []
# Go through table columns and represent each column as one or more
# plain Column objects (in new_cols) + metadata (in mixin_cols).
for col in tbl.itercols():
_represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols,
exclude_classes=exclude_classes)
# If no metadata was created then just return the original table.
if not mixin_cols:
return tbl
meta = deepcopy(tbl.meta)
meta['__serialized_columns__'] = mixin_cols
out = Table(new_cols, meta=meta, copy=False)
return out
def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info):
cls_full_name = obj_attrs.pop('__class__')
# If this is a supported class then import the class and run
# the _construct_from_col method. Prevent accidentally running
# untrusted code by only importing known astropy classes.
if cls_full_name not in __construct_mixin_classes:
raise ValueError('unsupported class for construct {}'.format(cls_full_name))
mod_name, cls_name = re.match(r'(.+)\.(\w+)', cls_full_name).groups()
module = import_module(mod_name)
cls = getattr(module, cls_name)
for attr, value in info.items():
if attr in cls.info.attrs_from_parent:
obj_attrs[attr] = value
mixin = cls.info._construct_from_dict(obj_attrs)
for attr, value in info.items():
if attr not in obj_attrs:
setattr(mixin.info, attr, value)
return mixin
class _TableLite(OrderedDict):
"""
Minimal table-like object for _construct_mixin_from_columns. This allows
manipulating the object like a Table but without the actual overhead
for a full Table.
More pressing, there is an issue with constructing MaskedColumn, where the
encoded Column components (data, mask) are turned into a MaskedColumn.
When this happens in a real table then all other columns are immediately
Masked and a warning is issued. This is not desirable.
"""
def add_column(self, col, index=0):
colnames = self.colnames
self[col.info.name] = col
for ii, name in enumerate(colnames):
if ii >= index:
self.move_to_end(name)
@property
def colnames(self):
return list(self.keys())
def itercols(self):
return self.values()
def _construct_mixin_from_columns(new_name, obj_attrs, out):
data_attrs_map = {}
for name, val in obj_attrs.items():
if isinstance(val, SerializedColumn):
if 'name' in val:
data_attrs_map[val['name']] = name
else:
_construct_mixin_from_columns(name, val, out)
data_attrs_map[name] = name
for name in data_attrs_map.values():
del obj_attrs[name]
# Get the index where to add new column
idx = min(out.colnames.index(name) for name in data_attrs_map)
# Name is the column name in the table (e.g. "coord.ra") and
# data_attr is the object attribute name (e.g. "ra"). A different
# example would be a formatted time object that would have (e.g.)
# "time_col" and "value", respectively.
for name, data_attr in data_attrs_map.items():
col = out[name]
obj_attrs[data_attr] = col
del out[name]
info = obj_attrs.pop('__info__', {})
if len(data_attrs_map) == 1:
# col is the first and only serialized column; in that case, use info
# stored on the column.
for attr, nontrivial in (('unit', lambda x: x not in (None, '')),
('format', lambda x: x is not None),
('description', lambda x: x is not None),
('meta', lambda x: x)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = col_attr
info['name'] = new_name
col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info)
out.add_column(col, index=idx)
def _construct_mixins_from_columns(tbl):
if '__serialized_columns__' not in tbl.meta:
return tbl
meta = tbl.meta.copy()
mixin_cols = meta.pop('__serialized_columns__')
out = _TableLite(tbl.columns)
for new_name, obj_attrs in mixin_cols.items():
_construct_mixin_from_columns(new_name, obj_attrs, out)
# If no quantity subclasses are in the output then output as Table.
# For instance ascii.read(file, format='ecsv') doesn't specify an
# output class and should return the minimal table class that
# represents the table file.
has_quantities = any(isinstance(col.info, QuantityInfo)
for col in out.itercols())
out_cls = QTable if has_quantities else Table
return out_cls(list(out.values()), names=out.colnames, copy=False, meta=meta)
| 41.028662
| 87
| 0.645579
| 1,735
| 12,883
| 4.637464
| 0.214409
| 0.02784
| 0.007954
| 0.006214
| 0.111733
| 0.072458
| 0.070346
| 0.049963
| 0.0348
| 0.024111
| 0
| 0.00855
| 0.264612
| 12,883
| 313
| 88
| 41.159744
| 0.840722
| 0.468292
| 0
| 0.080882
| 0
| 0
| 0.090362
| 0.066924
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0.007353
| 0.066176
| 0.014706
| 0.198529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6abe0f56148406c214bae2a7180acf428b205f37
| 1,914
|
py
|
Python
|
UVa 573 - The Snail/sample/main.py
|
tadvi/uva
|
0ac0cbdf593879b4fb02a3efc09adbb031cb47d5
|
[
"MIT"
] | 1
|
2020-11-24T03:17:21.000Z
|
2020-11-24T03:17:21.000Z
|
UVa 573 - The Snail/sample/main.py
|
tadvi/uva
|
0ac0cbdf593879b4fb02a3efc09adbb031cb47d5
|
[
"MIT"
] | null | null | null |
UVa 573 - The Snail/sample/main.py
|
tadvi/uva
|
0ac0cbdf593879b4fb02a3efc09adbb031cb47d5
|
[
"MIT"
] | 1
|
2021-04-11T16:22:31.000Z
|
2021-04-11T16:22:31.000Z
|
'''
Created on Jun 18, 2013
@author: Yubin Bai
All rights reserved.
'''
import time
from multiprocessing.pool import Pool
parallelSolve = False
infinity = 1 << 30
def solve(par):
H, U, D, F = par
day = 0
amountRise = U
currH = 0
while True:
amountRise = U * (1 - 0.01 * F * day)
currH += amountRise
if currH > H:
return 'success on day %d' % (day + 1)
currH -= D
if currH < 0:
return 'failure on day %d' % (day + 1)
day += 1
class Solver:
def getInput(self):
self.input = []
self.numOfTests = 0
while True:
H, U, D, F = map(int, self.fIn.readline().strip().split())
if H == 0:
break
self.numOfTests += 1
self.input.append((H, U, D, F))
def __init__(self):
self.fIn = open('input.txt')
self.fOut = open('output.txt', 'w')
self.results = []
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def sequential(self):
self.getInput()
millis1 = int(round(time.time() * 1000))
for i in self.input:
self.results.append(solve(i))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def makeOutput(self):
for test in range(self.numOfTests):
self.fOut.write("Case #%d: %s\n" % (test + 1, self.results[test]))
self.fIn.close()
self.fOut.close()
if __name__ == '__main__':
solver = Solver()
if parallelSolve:
solver.parallel()
else:
solver.sequential()
| 24.538462
| 78
| 0.527168
| 236
| 1,914
| 4.224576
| 0.355932
| 0.032096
| 0.048144
| 0.064193
| 0.238716
| 0.218656
| 0.164493
| 0.164493
| 0.164493
| 0.164493
| 0
| 0.037618
| 0.333333
| 1,914
| 77
| 79
| 24.857143
| 0.74373
| 0.03396
| 0
| 0.20339
| 0
| 0
| 0.068441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101695
| false
| 0
| 0.033898
| 0
| 0.186441
| 0.033898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6abe9ac6695fe5a1d34b503ad56c8e41374a9ea6
| 5,074
|
py
|
Python
|
scibert/models/text_classifier.py
|
tomhoper/scibert
|
3cc65f433808f7879c973dc4fc41bd25e465dc15
|
[
"Apache-2.0"
] | 1,143
|
2019-03-27T01:49:11.000Z
|
2022-03-24T10:43:47.000Z
|
scibert/models/text_classifier.py
|
tomhoper/scibert
|
3cc65f433808f7879c973dc4fc41bd25e465dc15
|
[
"Apache-2.0"
] | 91
|
2019-03-27T17:20:27.000Z
|
2022-03-29T09:29:58.000Z
|
scibert/models/text_classifier.py
|
tomhoper/scibert
|
3cc65f433808f7879c973dc4fc41bd25e465dc15
|
[
"Apache-2.0"
] | 206
|
2019-03-28T02:22:30.000Z
|
2022-03-30T07:07:05.000Z
|
from typing import Dict, Optional, List, Any
import torch
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy, F1Measure
from overrides import overrides
@Model.register("text_classifier")
class TextClassifier(Model):
"""
Implements a basic text classifier:
1) Embed tokens using `text_field_embedder`
2) Seq2SeqEncoder, e.g. BiLSTM
3) Append the first and last encoder states
4) Final feedforward layer
Optimized with CrossEntropyLoss. Evaluated with CategoricalAccuracy & F1.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
text_encoder: Seq2SeqEncoder,
classifier_feedforward: FeedForward,
verbose_metrics: False,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super(TextClassifier, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
self.text_encoder = text_encoder
self.classifier_feedforward = classifier_feedforward
self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim() , self.num_classes)
self.label_accuracy = CategoricalAccuracy()
self.label_f1_metrics = {}
self.verbose_metrics = verbose_metrics
for i in range(self.num_classes):
self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i)
self.loss = torch.nn.CrossEntropyLoss()
self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True)
initializer(self)
@overrides
def forward(self,
text: Dict[str, torch.LongTensor],
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
text : Dict[str, torch.LongTensor]
From a ``TextField``
label : torch.IntTensor, optional (default = None)
From a ``LabelField``
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
Metadata containing the original tokenization of the premise and
hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.
Returns
-------
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the label.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text = self.text_field_embedder(text)
mask = util.get_text_field_mask(text)
encoded_text = self.text_encoder(embedded_text, mask)
pooled = self.pool(encoded_text, mask)
ff_hidden = self.classifier_feedforward(pooled)
logits = self.prediction_layer(ff_hidden)
class_probs = F.softmax(logits, dim=1)
output_dict = {"logits": logits}
if label is not None:
loss = self.loss(logits, label)
output_dict["loss"] = loss
# compute F1 per label
for i in range(self.num_classes):
metric = self.label_f1_metrics[self.vocab.get_token_from_index(index=i, namespace="labels")]
metric(class_probs, label)
self.label_accuracy(logits, label)
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
class_probabilities = F.softmax(output_dict['logits'], dim=-1)
output_dict['class_probs'] = class_probabilities
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metric_dict = {}
sum_f1 = 0.0
for name, metric in self.label_f1_metrics.items():
metric_val = metric.get_metric(reset)
if self.verbose_metrics:
metric_dict[name + '_P'] = metric_val[0]
metric_dict[name + '_R'] = metric_val[1]
metric_dict[name + '_F1'] = metric_val[2]
sum_f1 += metric_val[2]
names = list(self.label_f1_metrics.keys())
total_len = len(names)
average_f1 = sum_f1 / total_len
metric_dict['average_F1'] = average_f1
metric_dict['accuracy'] = self.label_accuracy.get_metric(reset)
return metric_dict
| 40.919355
| 120
| 0.653331
| 579
| 5,074
| 5.518135
| 0.278066
| 0.022535
| 0.026604
| 0.028169
| 0.162128
| 0.080125
| 0.080125
| 0.064476
| 0.064476
| 0.037559
| 0
| 0.008225
| 0.257194
| 5,074
| 123
| 121
| 41.252033
| 0.83948
| 0.204769
| 0
| 0.078947
| 0
| 0
| 0.022135
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.131579
| 0
| 0.236842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6abec40d890d6b8f05f738693cce2c79127a8924
| 4,716
|
py
|
Python
|
plugins/template/tasks.py
|
crotwell/cmd2
|
5ce3a64e41258b6a694ad45bb1c604be53a1e974
|
[
"MIT"
] | 469
|
2016-02-16T16:18:48.000Z
|
2022-03-31T15:24:40.000Z
|
plugins/template/tasks.py
|
crotwell/cmd2
|
5ce3a64e41258b6a694ad45bb1c604be53a1e974
|
[
"MIT"
] | 1,076
|
2016-02-19T02:50:47.000Z
|
2022-03-22T03:08:06.000Z
|
plugins/template/tasks.py
|
crotwell/cmd2
|
5ce3a64e41258b6a694ad45bb1c604be53a1e974
|
[
"MIT"
] | 138
|
2016-02-19T02:46:23.000Z
|
2022-03-30T13:13:01.000Z
|
#
# -*- coding: utf-8 -*-
"""Development related tasks to be run with 'invoke'"""
import os
import pathlib
import shutil
import invoke
TASK_ROOT = pathlib.Path(__file__).resolve().parent
TASK_ROOT_STR = str(TASK_ROOT)
# shared function
def rmrf(items, verbose=True):
"""Silently remove a list of directories or files"""
if isinstance(items, str):
items = [items]
for item in items:
if verbose:
print("Removing {}".format(item))
shutil.rmtree(item, ignore_errors=True)
# rmtree doesn't remove bare files
try:
os.remove(item)
except FileNotFoundError:
pass
# create namespaces
namespace = invoke.Collection()
namespace_clean = invoke.Collection('clean')
namespace.add_collection(namespace_clean, 'clean')
#####
#
# pytest, pylint, and codecov
#
#####
@invoke.task
def pytest(context, junit=False, pty=True, append_cov=False):
"""Run tests and code coverage using pytest"""
ROOT_PATH = TASK_ROOT.parent.parent
with context.cd(str(ROOT_PATH)):
command_str = 'pytest --cov=cmd2_myplugin --cov-report=term --cov-report=html'
if append_cov:
command_str += ' --cov-append'
if junit:
command_str += ' --junitxml=junit/test-results.xml'
command_str += ' ' + str((TASK_ROOT / 'tests').relative_to(ROOT_PATH))
context.run(command_str, pty=pty)
namespace.add_task(pytest)
@invoke.task
def pytest_clean(context):
"""Remove pytest cache and code coverage files and directories"""
# pylint: disable=unused-argument
with context.cd(TASK_ROOT_STR):
dirs = ['.pytest_cache', '.cache', '.coverage']
rmrf(dirs)
namespace_clean.add_task(pytest_clean, 'pytest')
@invoke.task
def pylint(context):
"""Check code quality using pylint"""
context.run('pylint --rcfile=cmd2_myplugin/pylintrc cmd2_myplugin')
namespace.add_task(pylint)
@invoke.task
def pylint_tests(context):
"""Check code quality of test suite using pylint"""
context.run('pylint --rcfile=tests/pylintrc tests')
namespace.add_task(pylint_tests)
#####
#
# build and distribute
#
#####
BUILDDIR = 'build'
DISTDIR = 'dist'
@invoke.task
def build_clean(context):
"""Remove the build directory"""
# pylint: disable=unused-argument
rmrf(BUILDDIR)
namespace_clean.add_task(build_clean, 'build')
@invoke.task
def dist_clean(context):
"""Remove the dist directory"""
# pylint: disable=unused-argument
rmrf(DISTDIR)
namespace_clean.add_task(dist_clean, 'dist')
@invoke.task
def eggs_clean(context):
"""Remove egg directories"""
# pylint: disable=unused-argument
dirs = set()
dirs.add('.eggs')
for name in os.listdir(os.curdir):
if name.endswith('.egg-info'):
dirs.add(name)
if name.endswith('.egg'):
dirs.add(name)
rmrf(dirs)
namespace_clean.add_task(eggs_clean, 'eggs')
@invoke.task
def bytecode_clean(context):
"""Remove __pycache__ directories and *.pyc files"""
# pylint: disable=unused-argument
dirs = set()
for root, dirnames, files in os.walk(os.curdir):
if '__pycache__' in dirnames:
dirs.add(os.path.join(root, '__pycache__'))
for file in files:
if file.endswith(".pyc"):
dirs.add(os.path.join(root, file))
print("Removing __pycache__ directories and .pyc files")
rmrf(dirs, verbose=False)
namespace_clean.add_task(bytecode_clean, 'bytecode')
#
# make a dummy clean task which runs all the tasks in the clean namespace
clean_tasks = list(namespace_clean.tasks.values())
@invoke.task(pre=list(namespace_clean.tasks.values()), default=True)
def clean_all(context):
"""Run all clean tasks"""
# pylint: disable=unused-argument
pass
namespace_clean.add_task(clean_all, 'all')
@invoke.task(pre=[clean_all])
def sdist(context):
"""Create a source distribution"""
context.run('python setup.py sdist')
namespace.add_task(sdist)
@invoke.task(pre=[clean_all])
def wheel(context):
"""Build a wheel distribution"""
context.run('python setup.py bdist_wheel')
namespace.add_task(wheel)
#
# these two tasks are commented out so you don't
# accidentally run them and upload this template to pypi
#
# @invoke.task(pre=[sdist, wheel])
# def pypi(context):
# """Build and upload a distribution to pypi"""
# context.run('twine upload dist/*')
# namespace.add_task(pypi)
# @invoke.task(pre=[sdist, wheel])
# def pypi_test(context):
# """Build and upload a distribution to https://test.pypi.org"""
# context.run('twine upload --repository-url https://test.pypi.org/legacy/ dist/*')
# namespace.add_task(pypi_test)
| 23.231527
| 87
| 0.671968
| 617
| 4,716
| 4.993517
| 0.249595
| 0.04544
| 0.033755
| 0.05258
| 0.253489
| 0.185654
| 0.04544
| 0.022071
| 0
| 0
| 0
| 0.00105
| 0.192112
| 4,716
| 202
| 88
| 23.346535
| 0.807612
| 0.293681
| 0
| 0.195652
| 0
| 0
| 0.133831
| 0.027078
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0.021739
| 0.043478
| 0
| 0.173913
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6abf04d8aaa93e623f487cf9322ec9b114c31f92
| 2,590
|
py
|
Python
|
homeassistant/components/epsonworkforce/sensor.py
|
maexono/home-assistant
|
c174b83f5408124fc7834e8282969a1e8f9cca16
|
[
"Apache-2.0"
] | 2
|
2019-12-30T14:12:33.000Z
|
2021-07-05T10:33:08.000Z
|
homeassistant/components/epsonworkforce/sensor.py
|
maexono/home-assistant
|
c174b83f5408124fc7834e8282969a1e8f9cca16
|
[
"Apache-2.0"
] | 2
|
2022-01-13T04:00:03.000Z
|
2022-03-12T01:02:40.000Z
|
homeassistant/components/epsonworkforce/sensor.py
|
maexono/home-assistant
|
c174b83f5408124fc7834e8282969a1e8f9cca16
|
[
"Apache-2.0"
] | 3
|
2019-04-28T16:35:45.000Z
|
2020-05-28T15:21:59.000Z
|
"""Support for Epson Workforce Printer."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, CONF_MONITORED_CONDITIONS
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['epsonprinter==0.0.8']
_LOGGER = logging.getLogger(__name__)
MONITORED_CONDITIONS = {
'black': ['Inklevel Black', '%', 'mdi:water'],
'magenta': ['Inklevel Magenta', '%', 'mdi:water'],
'cyan': ['Inklevel Cyan', '%', 'mdi:water'],
'yellow': ['Inklevel Yellow', '%', 'mdi:water'],
'clean': ['Inklevel Cleaning', '%', 'mdi:water'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]),
})
SCAN_INTERVAL = timedelta(minutes=60)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the cartridge sensor."""
host = config.get(CONF_HOST)
from epsonprinter_pkg.epsonprinterapi import EpsonPrinterAPI
api = EpsonPrinterAPI(host)
if not api.available:
raise PlatformNotReady()
sensors = [EpsonPrinterCartridge(api, condition)
for condition in config[CONF_MONITORED_CONDITIONS]]
add_devices(sensors, True)
class EpsonPrinterCartridge(Entity):
"""Representation of a cartridge sensor."""
def __init__(self, api, cartridgeidx):
"""Initialize a cartridge sensor."""
self._api = api
self._id = cartridgeidx
self._name = MONITORED_CONDITIONS[self._id][0]
self._unit = MONITORED_CONDITIONS[self._id][1]
self._icon = MONITORED_CONDITIONS[self._id][2]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
@property
def state(self):
"""Return the state of the device."""
return self._api.getSensorValue(self._id)
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._api.available
def update(self):
"""Get the latest data from the Epson printer."""
self._api.update()
| 30.116279
| 71
| 0.679151
| 302
| 2,590
| 5.655629
| 0.384106
| 0.088993
| 0.040398
| 0.043911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003872
| 0.202317
| 2,590
| 85
| 72
| 30.470588
| 0.822846
| 0.146332
| 0
| 0.089286
| 0
| 0
| 0.079167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.160714
| 0
| 0.410714
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6abf99810278b3e6bb4dfbe19a2991c6db839dec
| 19,661
|
py
|
Python
|
bot/exts/help_channels/_cog.py
|
bast0006/bot
|
dec9a9dba77aa4322f9dc37b6493a8410e7482ec
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
bot/exts/help_channels/_cog.py
|
bast0006/bot
|
dec9a9dba77aa4322f9dc37b6493a8410e7482ec
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
bot/exts/help_channels/_cog.py
|
bast0006/bot
|
dec9a9dba77aa4322f9dc37b6493a8410e7482ec
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
import asyncio
import logging
import random
import typing as t
from datetime import datetime, timezone
from operator import attrgetter
import discord
import discord.abc
from discord.ext import commands
from bot import constants
from bot.bot import Bot
from bot.exts.help_channels import _caches, _channel, _cooldown, _message, _name, _stats
from bot.utils import channel as channel_utils, lock, scheduling
log = logging.getLogger(__name__)
NAMESPACE = "help"
HELP_CHANNEL_TOPIC = """
This is a Python help channel. You can claim your own help channel in the Python Help: Available category.
"""
class HelpChannels(commands.Cog):
"""
Manage the help channel system of the guild.
The system is based on a 3-category system:
Available Category
* Contains channels which are ready to be occupied by someone who needs help
* Will always contain `constants.HelpChannels.max_available` channels; refilled automatically
from the pool of dormant channels
* Prioritise using the channels which have been dormant for the longest amount of time
* If there are no more dormant channels, the bot will automatically create a new one
* If there are no dormant channels to move, helpers will be notified (see `notify()`)
* When a channel becomes available, the dormant embed will be edited to show `AVAILABLE_MSG`
* User can only claim a channel at an interval `constants.HelpChannels.claim_minutes`
* To keep track of cooldowns, user which claimed a channel will have a temporary role
In Use Category
* Contains all channels which are occupied by someone needing help
* Channel moves to dormant category after `constants.HelpChannels.idle_minutes` of being idle
* Command can prematurely mark a channel as dormant
* Channel claimant is allowed to use the command
* Allowed roles for the command are configurable with `constants.HelpChannels.cmd_whitelist`
* When a channel becomes dormant, an embed with `DORMANT_MSG` will be sent
Dormant Category
* Contains channels which aren't in use
* Channels are used to refill the Available category
Help channels are named after the chemical elements in `bot/resources/elements.json`.
"""
def __init__(self, bot: Bot):
self.bot = bot
self.scheduler = scheduling.Scheduler(self.__class__.__name__)
# Categories
self.available_category: discord.CategoryChannel = None
self.in_use_category: discord.CategoryChannel = None
self.dormant_category: discord.CategoryChannel = None
# Queues
self.channel_queue: asyncio.Queue[discord.TextChannel] = None
self.name_queue: t.Deque[str] = None
self.last_notification: t.Optional[datetime] = None
# Asyncio stuff
self.queue_tasks: t.List[asyncio.Task] = []
self.init_task = self.bot.loop.create_task(self.init_cog())
def cog_unload(self) -> None:
"""Cancel the init task and scheduled tasks when the cog unloads."""
log.trace("Cog unload: cancelling the init_cog task")
self.init_task.cancel()
log.trace("Cog unload: cancelling the channel queue tasks")
for task in self.queue_tasks:
task.cancel()
self.scheduler.cancel_all()
@lock.lock_arg(NAMESPACE, "message", attrgetter("channel.id"))
@lock.lock_arg(NAMESPACE, "message", attrgetter("author.id"))
@lock.lock_arg(f"{NAMESPACE}.unclaim", "message", attrgetter("author.id"), wait=True)
async def claim_channel(self, message: discord.Message) -> None:
"""
Claim the channel in which the question `message` was sent.
Move the channel to the In Use category and pin the `message`. Add a cooldown to the
claimant to prevent them from asking another question. Lastly, make a new channel available.
"""
log.info(f"Channel #{message.channel} was claimed by `{message.author.id}`.")
await self.move_to_in_use(message.channel)
await _cooldown.revoke_send_permissions(message.author, self.scheduler)
await _message.pin(message)
try:
await _message.dm_on_open(message)
except Exception as e:
log.warning("Error occurred while sending DM:", exc_info=e)
# Add user with channel for dormant check.
await _caches.claimants.set(message.channel.id, message.author.id)
self.bot.stats.incr("help.claimed")
# Must use a timezone-aware datetime to ensure a correct POSIX timestamp.
timestamp = datetime.now(timezone.utc).timestamp()
await _caches.claim_times.set(message.channel.id, timestamp)
await _caches.unanswered.set(message.channel.id, True)
# Not awaited because it may indefinitely hold the lock while waiting for a channel.
scheduling.create_task(self.move_to_available(), name=f"help_claim_{message.id}")
def create_channel_queue(self) -> asyncio.Queue:
"""
Return a queue of dormant channels to use for getting the next available channel.
The channels are added to the queue in a random order.
"""
log.trace("Creating the channel queue.")
channels = list(_channel.get_category_channels(self.dormant_category))
random.shuffle(channels)
log.trace("Populating the channel queue with channels.")
queue = asyncio.Queue()
for channel in channels:
queue.put_nowait(channel)
return queue
async def create_dormant(self) -> t.Optional[discord.TextChannel]:
"""
Create and return a new channel in the Dormant category.
The new channel will sync its permission overwrites with the category.
Return None if no more channel names are available.
"""
log.trace("Getting a name for a new dormant channel.")
try:
name = self.name_queue.popleft()
except IndexError:
log.debug("No more names available for new dormant channels.")
return None
log.debug(f"Creating a new dormant channel named {name}.")
return await self.dormant_category.create_text_channel(name, topic=HELP_CHANNEL_TOPIC)
async def close_check(self, ctx: commands.Context) -> bool:
"""Return True if the channel is in use and the user is the claimant or has a whitelisted role."""
if ctx.channel.category != self.in_use_category:
log.debug(f"{ctx.author} invoked command 'close' outside an in-use help channel")
return False
if await _caches.claimants.get(ctx.channel.id) == ctx.author.id:
log.trace(f"{ctx.author} is the help channel claimant, passing the check for dormant.")
self.bot.stats.incr("help.dormant_invoke.claimant")
return True
log.trace(f"{ctx.author} is not the help channel claimant, checking roles.")
has_role = await commands.has_any_role(*constants.HelpChannels.cmd_whitelist).predicate(ctx)
if has_role:
self.bot.stats.incr("help.dormant_invoke.staff")
return has_role
@commands.command(name="close", aliases=["dormant", "solved"], enabled=False)
async def close_command(self, ctx: commands.Context) -> None:
"""
Make the current in-use help channel dormant.
May only be invoked by the channel's claimant or by staff.
"""
# Don't use a discord.py check because the check needs to fail silently.
if await self.close_check(ctx):
log.info(f"Close command invoked by {ctx.author} in #{ctx.channel}.")
await self.unclaim_channel(ctx.channel, is_auto=False)
async def get_available_candidate(self) -> discord.TextChannel:
"""
Return a dormant channel to turn into an available channel.
If no channel is available, wait indefinitely until one becomes available.
"""
log.trace("Getting an available channel candidate.")
try:
channel = self.channel_queue.get_nowait()
except asyncio.QueueEmpty:
log.info("No candidate channels in the queue; creating a new channel.")
channel = await self.create_dormant()
if not channel:
log.info("Couldn't create a candidate channel; waiting to get one from the queue.")
notify_channel = self.bot.get_channel(constants.HelpChannels.notify_channel)
last_notification = await _message.notify(notify_channel, self.last_notification)
if last_notification:
self.last_notification = last_notification
self.bot.stats.incr("help.out_of_channel_alerts")
channel = await self.wait_for_dormant_channel()
return channel
async def init_available(self) -> None:
"""Initialise the Available category with channels."""
log.trace("Initialising the Available category with channels.")
channels = list(_channel.get_category_channels(self.available_category))
missing = constants.HelpChannels.max_available - len(channels)
# If we've got less than `max_available` channel available, we should add some.
if missing > 0:
log.trace(f"Moving {missing} missing channels to the Available category.")
for _ in range(missing):
await self.move_to_available()
# If for some reason we have more than `max_available` channels available,
# we should move the superfluous ones over to dormant.
elif missing < 0:
log.trace(f"Moving {abs(missing)} superfluous available channels over to the Dormant category.")
for channel in channels[:abs(missing)]:
await self.unclaim_channel(channel)
async def init_categories(self) -> None:
"""Get the help category objects. Remove the cog if retrieval fails."""
log.trace("Getting the CategoryChannel objects for the help categories.")
try:
self.available_category = await channel_utils.try_get_channel(
constants.Categories.help_available
)
self.in_use_category = await channel_utils.try_get_channel(
constants.Categories.help_in_use
)
self.dormant_category = await channel_utils.try_get_channel(
constants.Categories.help_dormant
)
except discord.HTTPException:
log.exception("Failed to get a category; cog will be removed")
self.bot.remove_cog(self.qualified_name)
async def init_cog(self) -> None:
"""Initialise the help channel system."""
log.trace("Waiting for the guild to be available before initialisation.")
await self.bot.wait_until_guild_available()
log.trace("Initialising the cog.")
await self.init_categories()
await _cooldown.check_cooldowns(self.scheduler)
self.channel_queue = self.create_channel_queue()
self.name_queue = _name.create_name_queue(
self.available_category,
self.in_use_category,
self.dormant_category,
)
log.trace("Moving or rescheduling in-use channels.")
for channel in _channel.get_category_channels(self.in_use_category):
await self.move_idle_channel(channel, has_task=False)
# Prevent the command from being used until ready.
# The ready event wasn't used because channels could change categories between the time
# the command is invoked and the cog is ready (e.g. if move_idle_channel wasn't called yet).
# This may confuse users. So would potentially long delays for the cog to become ready.
self.close_command.enabled = True
await self.init_available()
_stats.report_counts()
log.info("Cog is ready!")
async def move_idle_channel(self, channel: discord.TextChannel, has_task: bool = True) -> None:
"""
Make the `channel` dormant if idle or schedule the move if still active.
If `has_task` is True and rescheduling is required, the extant task to make the channel
dormant will first be cancelled.
"""
log.trace(f"Handling in-use channel #{channel} ({channel.id}).")
if not await _message.is_empty(channel):
idle_seconds = constants.HelpChannels.idle_minutes * 60
else:
idle_seconds = constants.HelpChannels.deleted_idle_minutes * 60
time_elapsed = await _channel.get_idle_time(channel)
if time_elapsed is None or time_elapsed >= idle_seconds:
log.info(
f"#{channel} ({channel.id}) is idle longer than {idle_seconds} seconds "
f"and will be made dormant."
)
await self.unclaim_channel(channel)
else:
# Cancel the existing task, if any.
if has_task:
self.scheduler.cancel(channel.id)
delay = idle_seconds - time_elapsed
log.info(
f"#{channel} ({channel.id}) is still active; "
f"scheduling it to be moved after {delay} seconds."
)
self.scheduler.schedule_later(delay, channel.id, self.move_idle_channel(channel))
async def move_to_available(self) -> None:
"""Make a channel available."""
log.trace("Making a channel available.")
channel = await self.get_available_candidate()
log.info(f"Making #{channel} ({channel.id}) available.")
await _message.send_available_message(channel)
log.trace(f"Moving #{channel} ({channel.id}) to the Available category.")
await _channel.move_to_bottom(
channel=channel,
category_id=constants.Categories.help_available,
)
_stats.report_counts()
async def move_to_dormant(self, channel: discord.TextChannel) -> None:
"""Make the `channel` dormant."""
log.info(f"Moving #{channel} ({channel.id}) to the Dormant category.")
await _channel.move_to_bottom(
channel=channel,
category_id=constants.Categories.help_dormant,
)
log.trace(f"Sending dormant message for #{channel} ({channel.id}).")
embed = discord.Embed(description=_message.DORMANT_MSG)
await channel.send(embed=embed)
log.trace(f"Pushing #{channel} ({channel.id}) into the channel queue.")
self.channel_queue.put_nowait(channel)
_stats.report_counts()
@lock.lock_arg(f"{NAMESPACE}.unclaim", "channel")
async def unclaim_channel(self, channel: discord.TextChannel, *, is_auto: bool = True) -> None:
"""
Unclaim an in-use help `channel` to make it dormant.
Unpin the claimant's question message and move the channel to the Dormant category.
Remove the cooldown role from the channel claimant if they have no other channels claimed.
Cancel the scheduled cooldown role removal task.
Set `is_auto` to True if the channel was automatically closed or False if manually closed.
"""
claimant_id = await _caches.claimants.get(channel.id)
_unclaim_channel = self._unclaim_channel
# It could be possible that there is no claimant cached. In such case, it'd be useless and
# possibly incorrect to lock on None. Therefore, the lock is applied conditionally.
if claimant_id is not None:
decorator = lock.lock_arg(f"{NAMESPACE}.unclaim", "claimant_id", wait=True)
_unclaim_channel = decorator(_unclaim_channel)
return await _unclaim_channel(channel, claimant_id, is_auto)
async def _unclaim_channel(self, channel: discord.TextChannel, claimant_id: int, is_auto: bool) -> None:
"""Actual implementation of `unclaim_channel`. See that for full documentation."""
await _caches.claimants.delete(channel.id)
# Ignore missing tasks because a channel may still be dormant after the cooldown expires.
if claimant_id in self.scheduler:
self.scheduler.cancel(claimant_id)
claimant = self.bot.get_guild(constants.Guild.id).get_member(claimant_id)
if claimant is None:
log.info(f"{claimant_id} left the guild during their help session; the cooldown role won't be removed")
elif not any(claimant.id == user_id for _, user_id in await _caches.claimants.items()):
# Remove the cooldown role if the claimant has no other channels left
await _cooldown.remove_cooldown_role(claimant)
await _message.unpin(channel)
await _stats.report_complete_session(channel.id, is_auto)
await self.move_to_dormant(channel)
# Cancel the task that makes the channel dormant only if called by the close command.
# In other cases, the task is either already done or not-existent.
if not is_auto:
self.scheduler.cancel(channel.id)
async def move_to_in_use(self, channel: discord.TextChannel) -> None:
"""Make a channel in-use and schedule it to be made dormant."""
log.info(f"Moving #{channel} ({channel.id}) to the In Use category.")
await _channel.move_to_bottom(
channel=channel,
category_id=constants.Categories.help_in_use,
)
timeout = constants.HelpChannels.idle_minutes * 60
log.trace(f"Scheduling #{channel} ({channel.id}) to become dormant in {timeout} sec.")
self.scheduler.schedule_later(timeout, channel.id, self.move_idle_channel(channel))
_stats.report_counts()
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
"""Move an available channel to the In Use category and replace it with a dormant one."""
if message.author.bot:
return # Ignore messages sent by bots.
await self.init_task
if channel_utils.is_in_category(message.channel, constants.Categories.help_available):
if not _channel.is_excluded_channel(message.channel):
await self.claim_channel(message)
else:
await _message.check_for_answer(message)
@commands.Cog.listener()
async def on_message_delete(self, msg: discord.Message) -> None:
"""
Reschedule an in-use channel to become dormant sooner if the channel is empty.
The new time for the dormant task is configured with `HelpChannels.deleted_idle_minutes`.
"""
await self.init_task
if not channel_utils.is_in_category(msg.channel, constants.Categories.help_in_use):
return
if not await _message.is_empty(msg.channel):
return
log.info(f"Claimant of #{msg.channel} ({msg.author}) deleted message, channel is empty now. Rescheduling task.")
# Cancel existing dormant task before scheduling new.
self.scheduler.cancel(msg.channel.id)
delay = constants.HelpChannels.deleted_idle_minutes * 60
self.scheduler.schedule_later(delay, msg.channel.id, self.move_idle_channel(msg.channel))
async def wait_for_dormant_channel(self) -> discord.TextChannel:
"""Wait for a dormant channel to become available in the queue and return it."""
log.trace("Waiting for a dormant channel.")
task = asyncio.create_task(self.channel_queue.get())
self.queue_tasks.append(task)
channel = await task
log.trace(f"Channel #{channel} ({channel.id}) finally retrieved from the queue.")
self.queue_tasks.remove(task)
return channel
| 42.010684
| 120
| 0.670363
| 2,540
| 19,661
| 5.055118
| 0.159843
| 0.017523
| 0.013707
| 0.00662
| 0.195016
| 0.125389
| 0.08053
| 0.047196
| 0.039252
| 0.039252
| 0
| 0.000743
| 0.247088
| 19,661
| 467
| 121
| 42.100642
| 0.866649
| 0.15869
| 0
| 0.123552
| 0
| 0.007722
| 0.181683
| 0.008811
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011583
| false
| 0.003861
| 0.050193
| 0
| 0.111969
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac05bd39a70de6163a586a9ee9a2b3649ee2eef
| 16,516
|
py
|
Python
|
code/menu/screens/shopmenu.py
|
LordZagreus/LodeRunner
|
68aab36be47cabe31e52f3ee43520bdafcdf3c95
|
[
"MIT"
] | 1
|
2017-10-31T22:26:22.000Z
|
2017-10-31T22:26:22.000Z
|
code/menu/screens/shopmenu.py
|
team-sparrow/LodeRunner
|
68aab36be47cabe31e52f3ee43520bdafcdf3c95
|
[
"MIT"
] | 2
|
2019-07-05T03:17:18.000Z
|
2019-07-08T16:15:29.000Z
|
code/menu/screens/shopmenu.py
|
team-sparrow/LodeRunner
|
68aab36be47cabe31e52f3ee43520bdafcdf3c95
|
[
"MIT"
] | 1
|
2020-10-15T09:03:20.000Z
|
2020-10-15T09:03:20.000Z
|
import os
import math
import random
import time
from code.menu.menu import Menu
from code.tools.eventqueue import EventQueue
from code.tools.xml import XMLParser
from code.utils.common import coalesce, intersect, offset_rect, log, log2, xml_encode, xml_decode, translate_rgb_to_string
from code.constants.common import SCREEN_WIDTH, SCREEN_HEIGHT, PAUSE_MENU_X, PAUSE_MENU_Y, PAUSE_MENU_WIDTH, PAUSE_MENU_HEIGHT, MODE_GAME, TILE_WIDTH, TILE_HEIGHT, DIR_UP, DIR_RIGHT, DIR_DOWN, DIR_LEFT, SPLASH_MODE_GREYSCALE_ANIMATED
from code.constants.states import STATUS_ACTIVE, STATUS_INACTIVE, GAME_STATE_ACTIVE, GAME_STATE_NOT_READY
from code.constants.newsfeeder import *
class ShopMenu(Menu):
def __init__(self):
Menu.__init__(self)
# Assume all shop menus come from already-lightboxed dialogues.
self.lightbox_controller.set_interval( self.lightbox_controller.get_target() )
# We're going to keep a handle to the seller so that we can
# remove items from their inventory after a purchase...
self.vendor = None#seller
# Shop title (e.g. "Bob's Fine Items")
self.title = "Shoppe"
# Salutation (e.g. "Look at these great items")
self.message = "Take a look at my inventory."
# Before we begin populating the shop menu, we'll first
# make sure the NPC seller stocks any specified "required" items...
self.required_item_names = []
# Track item quality threshholds (low and high)
self.min_item_quality = 0
self.max_item_quality = 0
# Items in stock at any given time
self.max_items_stocked = 1
# Number of times the vendor can restock
self.max_item_reloads = 1
# Track whether this is the first build or a refresh
self.first_build = True
# Fire build event
self.fire_event("build")
def handle_event(self, event, control_center, universe):#params, user_input, network_controller, universe, active_map, session, widget_dispatcher, text_renderer, save_controller, refresh = False):
# Events that result from event handling
results = EventQueue()
# Convenience
(action, params) = (
event.get_action(),
event.get_params()
)
# Build root menu
if ( action == "build" ):
results.append(
self.handle_build_event(event, control_center, universe)
)
# Select an item, get confirmation...
elif ( action == "show:confirm-purchase" ):
results.append(
self.handle_show_confirm_purchase_event(event, control_center, universe)
)
# Commit an item purchase
elif ( action == "game:buy-item" ):
results.append(
self.handle_shop_buy_item_event(event, control_center, universe)
)
# Go to the previous page (e.g. close buy item confirm dialog)
elif ( action == "back" ):
results.append(
self.handle_back_event(event, control_center, universe)
)
# Finalize a "back" call
elif ( action == "previous-page" ):
# Let's just go back one page
self.page_back(1)
# Leave shop, resume game
elif ( action == "resume-game" ):
results.append(
self.handle_resume_game_event(event, control_center, universe)
)
# Restore the universe to active game state, set this very menu to inactive
elif ( action == "kill" ):
results.append(
self.handle_kill_event(event, control_center, universe)
)
# Return events
return results
# Configure the shop menu (more options than your typical menu, we need to define many parameters)
def configure(self, options):
# Common menu configuration
self.__std_configure__(options)
if ( "vendor" in options ):
self.vendor = options["vendor"]
if ( "title" in options ):
self.title = options["title"]
if ( "message" in options ):
self.message = options["message"]
if ( "required-item-names" in options ):
self.required_item_names.extend( options["required-item-names"] )#.split(";") )
if ( "min-quality" in options ):
self.min_item_quality = int( options["min-quality"] )
if ( "max-quality" in options ):
self.max_item_quality = int( options["max-quality"] )
if ( "max-items" in options ):
self.max_items_stocked = int( options["max-items"] )
if ( "max-reloads" in options ):
self.max_item_reloads = int( options["max-reloads"] )
# For chaining
return self
# Build the shop menu
def handle_build_event(self, event, control_center, universe):
# Events that result from handling this event (on-birth events, etc.)
results = EventQueue()
# Convenience
params = event.get_params()
# Fetch the widget dispatcher
widget_dispatcher = control_center.get_widget_dispatcher()
# Pause the game so that we can shop, if this is the first build...
if (self.first_build):
# Pause
universe.pause()
# Call in the pause splash
control_center.get_splash_controller().set_mode(SPLASH_MODE_GREYSCALE_ANIMATED)
# Before populating the vendor's inventory (or re-populating),
# clear it of any items the player has acquired since last shopping with this vendor...
self.vendor.remove_erstwhile_acquired_items_from_inventory(universe)
# Populate inventory for this shoppe's vendor...
self.vendor.populate_vendor_inventory(
min_quality = self.min_item_quality,#int( node.get_attribute("min-quality") ),
max_quality = self.max_item_quality,#int( node.get_attribute("min-quality") ),
required_item_names = self.required_item_names,
max_items = self.max_items_stocked,#int( node.get_attribute("max-items") ),
max_reloads = self.max_item_reloads,#int( node.get_attribute("max-reloads") ),
universe = universe
)
# Scope
root = None
# Does the vendor have anything in stock? Use this data
# to determine which template we load...
if ( self.vendor.get_vendor_inventory_count() == 0 ):
# Fetch the "nothing in stock" template
template = self.fetch_xml_template( "shop.directory", version = "out-of-items" ).add_parameters({
"@x": xml_encode( "%d" % (SCREEN_WIDTH - (int( (SCREEN_WIDTH - PAUSE_MENU_WIDTH) / 2 ))) ),
"@y": xml_encode( "%d" % PAUSE_MENU_Y ),
"@width": xml_encode( "%d" % int(PAUSE_MENU_WIDTH / 2) ),
"@height": xml_encode( "%d" % PAUSE_MENU_HEIGHT ),
"@shop-title": xml_encode( self.title )
})
# Compile template
root = template.compile_node_by_id("menu")
# We have items to sell...
else:
# Fetch the "shopping directory" template
template = self.fetch_xml_template( "shop.directory", version = "default" ).add_parameters({
"@x": xml_encode( "%d" % (SCREEN_WIDTH - (int( (SCREEN_WIDTH - PAUSE_MENU_WIDTH) / 2 ))) ),
"@y": xml_encode( "%d" % PAUSE_MENU_Y ),
"@width": xml_encode( "%d" % int(PAUSE_MENU_WIDTH / 2) ),
"@height": xml_encode( "%d" % PAUSE_MENU_HEIGHT ),
"@shop-title": xml_encode( self.title ),
"@salutation": xml_encode( self.message )
})
# Compile template
root = template.compile_node_by_id("menu")
# Now we'll add an entry for each available item...
for item_name in self.vendor.get_vendor_inventory_item_names():
# Grab handle
item = universe.get_item_by_name(item_name)
# Validate
if (item):
# How much money do we currently have?
money = int( universe.get_session_variable("core.gold.wallet").get_value() )
# Template version for this item depends on whether we can afford it...
template_version = ( "affordable" if (money >= item.cost) else "unaffordable" )
# Fetch the appropriate template for an individual item
template = self.fetch_xml_template( "shop.directory.insert", version = template_version ).add_parameters({
"@item-name": xml_encode( item.name ),
"@item-title": xml_encode( item.title ),
"@item-cost": xml_encode( "%d" % item.cost ),
"@item-advertisement": xml_encode( item.description )
})
# Compile
node = template.compile_node_by_id("insert")
# Inject into inventory area...
root.find_node_by_id("ext.inventory").add_node(node)
# Create widget
widget = widget_dispatcher.convert_node_to_widget(root, control_center, universe)
widget.set_id("root")
# We have definitely completed the first build now
self.first_build = False
# Add the new page
self.add_widget_via_event(widget, event)
# Return events
return results
# Show the "are you sure you wanna buy this?" page
def handle_show_confirm_purchase_event(self, event, control_center, universe):
# Events that result from handling this event (on-birth events, etc.)
results = EventQueue()
# Convenience
params = event.get_params()
# Fetch the widget dispatcher
widget_dispatcher = control_center.get_widget_dispatcher()
# Get a handle to the actual item...
item = universe.get_item_by_name( params["item-name"] )
# Validate
if (item):
# Fetch confirm purchase template
template = self.fetch_xml_template("shop.buy.confirm").add_parameters({
"@width": xml_encode( "%d" % int(PAUSE_MENU_WIDTH / 2) ),
"@height": xml_encode( "%d" % SCREEN_HEIGHT ),
"@item-name": xml_encode( item.get_name() ),
"@item-title": xml_encode( item.get_title() ),
"@item-cost": xml_encode( "%d" % item.get_cost() )
})
# Compile template
root = template.compile_node_by_id("menu")
# Create widget
widget = widget_dispatcher.convert_node_to_widget(root, control_center, universe)
widget.set_id("confirm-shop-purchase")
# Add the new page
self.add_widget_via_event(widget, event, exclusive = False)
# Return events
return results
# Commit an item purchase
def handle_shop_buy_item_event(self, event, control_center, universe):
# Events that result from handling this event (on-birth events, etc.)
results = EventQueue()
# Convenience
params = event.get_params()
# Get a reference to the item (for cost info, etc.)
item = universe.get_item_by_name( params["item-name"] )
# Acquire the item by its name
universe.acquire_item_by_name( item.get_name() )
# Post a newsfeeder notice
control_center.get_window_controller().get_newsfeeder().post({
"type": NEWS_ITEM_NEW,
"title": control_center.get_localization_controller().get_label("new-item-purchased:header"),
"content": item.get_title()
})
# Add a historical record
universe.add_historical_record(
"purchases",
control_center.get_localization_controller().get_label(
"purchased-m-from-n-for-g:message",
{
"@m": item.get_title(),
"@n": self.vendor.nick,
"@g": item.get_cost()
}
)
#"Bought [color=special]%s[/color] for [color=special]%s[/color] gold." % ( item.get_title(), item.get_cost() )
)
# Remove from seller's inventory
self.vendor.remove_item_from_vendor_inventory( item.get_name() )
# Increase sales count for vendor
self.vendor.increase_sales_count(1)
# Reduce player's wallet amount by the cost...
universe.increment_session_variable(
"core.gold.wallet",
-1 * item.get_cost()
)
# Count as gold spent
universe.increment_session_variable(
"stats.gold-spent",
item.get_cost()
)
# Execute the "wallet-changed" achievement hook
universe.execute_achievement_hook( "wallet-changed", control_center )
# Increase universe stats for items bought
universe.get_session_variable("stats.items-bought").increment_value(1)
# Execute the "bought-item" achievement hook
universe.execute_achievement_hook( "bought-item", control_center )
# Get the active map
m = universe.get_active_map()
# Check for a generic "onpurchase" script for the vendor
m.run_script(
"%s.onpurchase" % self.vendor.get_name(),
control_center,
universe,
execute_all = True # Try to loop entire script (?)
)
# Check for an onpurchase script (perhaps the game reacts in some way to an item you might have bought)
m.run_script(
name = "%s.onpurchase" % item.get_name(),
control_center = control_center,
universe = universe,
execute_all = True
)
# Refresh UI
self.refresh_pages(control_center, universe, curtailed_count = 1)
# After rebuilding the UI, we will have restocked the NPC's inventory.
# Thus, if the NPC has no inventory available, we have just bought their last item...
if ( self.vendor.get_vendor_inventory_count() == 0 ):
# Execute the "bought-all-items" achievement hook
universe.execute_achievement_hook( "bought-all-items", control_center )
# I'm going to set the cursor at "home" position for the shop
self.get_widget_by_id("root").set_cursor_at_beginning()#finalize = True)
# Return events
return results
# Go back a page (animated)
def handle_back_event(self, event, control_center, universe):
# Events that result from handling this event (on-birth events, etc.)
results = EventQueue()
# Convenience
params = event.get_params()
# Get the active page
page = self.get_active_page()
# Validate
if (page):
# Dismiss the page
page.hide(
on_complete = "previous-page"
)
# Return events
return results
# Leave the shop and resume play
def handle_resume_game_event(self, event, control_center, universe):
# Events that result from handling this event (on-birth events, etc.)
results = EventQueue()
# Convenience
params = event.get_params()
# Dismiss lightbox effect
self.lightbox_controller.set_target(0)
# Dismiss the splash controller, calling to resume game action once done...
control_center.get_splash_controller().dismiss(
on_complete = "game:unpause"
)
#hmenu.slide(DIR_LEFT, percent = 1.0)
#row_menu.slide(DIR_RIGHT, percent = 1.0)
# Resume game, killing shop menu when widget disappears
self.get_widget_by_id("root").hide(
on_complete = "kill"
)
# Return events
return results
# Kill event. Set game status back to active when shopping is done.
def handle_kill_event(self, event, control_center, universe):
# Events that result from handling this event (on-birth events, etc.)
results = EventQueue()
# Convenience
params = event.get_params()
# Done with the shop menu widget; trash it.
self.set_status(STATUS_INACTIVE)
# Return events
return results
| 31.339658
| 233
| 0.60069
| 1,928
| 16,516
| 4.946058
| 0.194502
| 0.039534
| 0.039639
| 0.035445
| 0.356544
| 0.27716
| 0.256292
| 0.221791
| 0.205852
| 0.171456
| 0
| 0.001921
| 0.306491
| 16,516
| 526
| 234
| 31.39924
| 0.830627
| 0.273311
| 0
| 0.261062
| 0
| 0
| 0.075
| 0.010101
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039823
| false
| 0
| 0.048673
| 0
| 0.128319
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac069f3cef035db6da504010b64c5c2110dea99
| 3,665
|
py
|
Python
|
lib/bridgedb/runner.py
|
liudonghua123/bridgedb
|
94dd10673f9e6650e8a00e162f348e64f7a1ecab
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
lib/bridgedb/runner.py
|
liudonghua123/bridgedb
|
94dd10673f9e6650e8a00e162f348e64f7a1ecab
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
lib/bridgedb/runner.py
|
liudonghua123/bridgedb
|
94dd10673f9e6650e8a00e162f348e64f7a1ecab
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# -*- coding: utf-8 ; test-case-name: bridgedb.test.test_runner -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <[email protected]>
# please also see AUTHORS file
# :copyright: (c) 2007-2015, The Tor Project, Inc.
# (c) 2007-2015, all entities within the AUTHORS file
# (c) 2012-2015, Isis Lovecruft
# :license: 3-clause BSD, see included LICENSE for information
"""Classes for running components and servers, as well as daemonisation.
** Module Overview: **
"""
from __future__ import print_function
import logging
import sys
import os
from twisted.python import procutils
def find(filename):
"""Find the executable ``filename``.
:param string filename: The executable to search for. Must be in the
effective user ID's $PATH.
:rtype: string
:returns: The location of the executable, if found. Otherwise, returns
None.
"""
executable = None
logging.debug("Searching for installed '%s'..." % filename)
which = procutils.which(filename, os.X_OK)
if len(which) > 0:
for that in which:
if os.stat(that).st_uid == os.geteuid():
executable = that
break
if not executable:
return None
logging.debug("Found installed script at '%s'" % executable)
return executable
def generateDescriptors(count=None, rundir=None):
"""Run a script which creates fake bridge descriptors for testing purposes.
This will run Leekspin_ to create bridge server descriptors, bridge
extra-info descriptors, and networkstatus document.
.. warning: This function can take a very long time to run, especially in
headless environments where entropy sources are minimal, because it
creates the keys for each mocked OR, which are embedded in the server
descriptors, used to calculate the OR fingerprints, and sign the
descriptors, among other things.
.. _Leekspin: https://gitweb.torproject.org/user/isis/leekspin.git
:param integer count: Number of mocked bridges to generate descriptor
for. (default: 3)
:type rundir: string or None
:param rundir: If given, use this directory as the current working
directory for the bridge descriptor generator script to run in. The
directory MUST already exist, and the descriptor files will be created
in it. If None, use the whatever directory we are currently in.
"""
import subprocess
import os.path
proc = None
statuscode = 0
script = 'leekspin'
rundir = rundir if os.path.isdir(rundir) else None
count = count if count else 3
try:
proc = subprocess.Popen([script, '-n', str(count)],
close_fds=True, cwd=rundir)
finally:
if proc is not None:
proc.wait()
if proc.returncode:
print("There was an error generating bridge descriptors.",
"(Returncode: %d)" % proc.returncode)
statuscode = proc.returncode
else:
print("Sucessfully generated %s descriptors." % str(count))
del subprocess
return statuscode
def doDumpBridges(config):
"""Dump bridges by assignment to a file.
This function handles the commandline '--dump-bridges' option.
:type config: :class:`bridgedb.Main.Conf`
:param config: The current configuration.
"""
import bridgedb.Bucket as bucket
bucketManager = bucket.BucketManager(config)
bucketManager.assignBridgesToBuckets()
bucketManager.dumpBridges()
| 33.318182
| 79
| 0.661937
| 452
| 3,665
| 5.34292
| 0.477876
| 0.016149
| 0.007453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013965
| 0.257572
| 3,665
| 109
| 80
| 33.623853
| 0.873576
| 0.53397
| 0
| 0
| 0
| 0
| 0.110261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.177778
| 0
| 0.311111
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac297a5895de04303f5fe688063a599cff885d4
| 4,053
|
py
|
Python
|
batch_processing_dataflow/play_store_flow.py
|
KeeplerIO/meetup-hands-on-gcp-2019
|
3674922d89d2be8984eb5719f0faaae127823ab4
|
[
"MIT"
] | 1
|
2019-04-03T17:47:04.000Z
|
2019-04-03T17:47:04.000Z
|
batch_processing_dataflow/play_store_flow.py
|
KeeplerIO/meetup-hands-on-gcp-2019
|
3674922d89d2be8984eb5719f0faaae127823ab4
|
[
"MIT"
] | 2
|
2020-08-10T10:52:57.000Z
|
2022-01-22T04:18:42.000Z
|
batch_processing_dataflow/play_store_flow.py
|
KeeplerIO/meetup-hands-on-gcp-2019
|
3674922d89d2be8984eb5719f0faaae127823ab4
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import apache_beam as beam
from apache_beam.io import WriteToBigQuery
from apache_beam.io import ReadFromText, WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
class ProcessCSV(beam.DoFn):
def process(self, element, *args, **kwargs):
import csv
formated_element = [element.encode('utf8')]
processed_csv = csv.DictReader(formated_element, fieldnames=['App', 'Category', 'Rating', 'Reviews', 'Size',
'Installs', 'Type', 'Price', 'Content_Rating',
'Genres', 'Last_Updated', 'Current_Ver',
'Android_Ver'], delimiter=',')
processed_fields = processed_csv.next()
if processed_fields.get('Category').replace('.','').isdigit():
return None
return [processed_fields]
class ParseRecord(beam.DoFn):
def process(self, element, *args, **kwargs):
from datetime import datetime
import math
def string_to_megabyte(raw_string):
if raw_string.upper().endswith('K'):
multiplier = 1000
elif raw_string.upper().endswith('M'):
multiplier = 1000 * 1000
else:
return None
return (float(raw_string[:-1]) * multiplier) / 1000000
new_element = {}
rating = float(element['Rating'])
new_element['Rating'] = rating if not math.isnan(rating) else None
new_element['Size'] = string_to_megabyte(element['Size'])
new_element['Price'] = float(element['Price'].replace("$",""))
new_element['Installs'] = int(element['Installs'].replace("+", "").replace(",",""))
new_element['Last_Updated'] = datetime.strptime(element['Last_Updated'], '%B %d, %Y').strftime('%Y-%m-%d')
new_element['Category'] = element['Category']
new_element['Genres'] = element['Genres']
new_element['App'] = element['App']
new_element['Content_Rating'] = element['Content_Rating']
new_element['Reviews'] = element['Reviews']
new_element['Android_Ver'] = element['Android_Ver']
new_element['Type'] = element['Type']
new_element['Current_Ver'] = element['Current_Ver']
logging.info(new_element)
return [new_element]
def run(argv=None):
"""Main entry point. It defines and runs the pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
default='gs://meetup-batch-processing/input/googleplaystore.csv',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
default='gs://meetup-batch-processing/output/googleplaystore.csv',
help='Output file to process.')
parser.add_argument('--table-output',
dest='table_output',
default='meetup-hands-on-gcp-2019:googleplaystore_batch_dataflow.play_store',
help='Bigquery table name for output.')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
with beam.Pipeline(options=pipeline_options) as pipeline:
raw_lines = pipeline | 'ReadFromCsv' >> ReadFromText(known_args.input, skip_header_lines=1)
lines = raw_lines | 'processCsv' >> beam.ParDo(ProcessCSV())
output = lines | 'parseRecord' >> beam.ParDo(ParseRecord())
output | 'writeBigQuery' >> WriteToBigQuery(known_args.table_output,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=beam.io.BigQueryDisposition.CREATE_NEVER)
logging.info('Finished.')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| 42.663158
| 116
| 0.593881
| 406
| 4,053
| 5.729064
| 0.334975
| 0.068788
| 0.018057
| 0.013758
| 0.104041
| 0.059329
| 0.033534
| 0.033534
| 0
| 0
| 0
| 0.008895
| 0.278806
| 4,053
| 94
| 117
| 43.117021
| 0.786863
| 0.012583
| 0
| 0.054054
| 0
| 0
| 0.174718
| 0.043805
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.121622
| 0
| 0.27027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac30849631c3b7df115a92dba1c94f0bb05ed26
| 4,259
|
py
|
Python
|
backend/main/server/resources/Message.py
|
Manotomo-Alliance-Support-Squad/WWS
|
3df21a3f715eeb3b57314bf08c38f2239b2ba399
|
[
"MIT"
] | null | null | null |
backend/main/server/resources/Message.py
|
Manotomo-Alliance-Support-Squad/WWS
|
3df21a3f715eeb3b57314bf08c38f2239b2ba399
|
[
"MIT"
] | 20
|
2021-03-15T20:30:35.000Z
|
2021-06-02T19:16:55.000Z
|
backend/main/server/resources/Message.py
|
Manotomo-Alliance-Support-Squad/WWS
|
3df21a3f715eeb3b57314bf08c38f2239b2ba399
|
[
"MIT"
] | null | null | null |
from flask import request
from flask_jwt import jwt_required
from flask_restful import Resource
from main.server import app, cache, db
from main.server.models import Message, MessageSchema
messages_schema = MessageSchema(many=True)
message_schema = MessageSchema()
@app.after_request
def add_header(response):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST'
response.headers[
'Access-Control-Allow-Headers'] = 'Access-Control-Allow-Headers, Origin,Accept, X-Requested-With, Content-Type, Access-Control-Request-Method, Access-Control-Request-Headers'
return response
class MessageCount(Resource):
@cache.cached(timeout=100)
def get(self):
"""Gets the number of messages available on the server"""
return {'status': 'success', 'count': Message.query.count()}, 200
class MessageListRangeResource(Resource):
@cache.cached(timeout=100)
def get(self, lower, upper):
"""Gets a range of messages on the server"""
if int(lower) < 1:
return {'status': 'fail', 'messages': 'Invalid index: ' + str(lower)}, 400
if int(lower) > int(upper):
return {'status': 'fail',
'messages': 'Upper range cannot be less than lower range: ' + str(lower) + '>' + str(upper)}, 400
messages = Message.query.filter(Message.messageID >= int(lower)).filter(Message.messageID <= int(upper))
if not messages:
return {'status': 'fail',
'messages': 'Out of range: ' + str(lower) + ' - ' + str(upper) + ' does not exist'}, 404
messages = messages_schema.dump(messages)
if not Message.query.filter_by(messageID=upper).first(): # the last item in the range
return {'status': 'success', 'messages': messages}, 206 # Partial Content Served
return {'status': 'success', 'messages': messages}, 200
class MessageListResource(Resource):
@cache.cached(timeout=100)
def get(self):
"""Gets all messages on the server"""
messages = Message.query.all()
messages = messages_schema.dump(messages)
if not messages:
return {'status': 'success', 'messages': messages}, 206 # Partial Content Served
return {'status': 'success', 'messages': messages}, 200
@jwt_required()
def post(self):
"""Add message"""
json_data = request.get_json(force=True)
if not json_data:
return {'status': 'fail', 'message': 'No input data'}, 400
errors = message_schema.validate(json_data)
if errors:
return {'status': 'fail', 'message': 'Error handling request'}, 422
data = message_schema.load(json_data)
message = Message.query.filter_by(orig_msg=data.get('orig_msg')).first()
if message:
return {'status': 'fail', 'message': 'Message already exists'}, 400
message = Message(orig_msg=data.get('orig_msg'),
tl_msg=data.get('tl_msg'),
country=data.get('country'),
username=data.get('username'))
db.session.add(message)
db.session.commit()
return {'status': 'success', 'message': 'Message successfully created'}, 201
class MessageResource(Resource):
@cache.cached(timeout=100)
def get(self, messageID):
""""Get a message by message ID"""
message = Message.query.filter_by(messageID=messageID)
if not message.first():
return {'status': 'fail', 'message': 'No message with ID ' + str(messageID) + ' exists'}, 404
message = messages_schema.dump(message)
return {'status': 'success', 'messages': message}, 200
@jwt_required()
def delete(self, messageID):
"""delete a message by ID"""
message = Message.query.filter_by(messageID=messageID)
if not message.first():
return {'status': 'fail', 'message': 'No message with ID ' + str(messageID) + ' exists'}, 404
message.delete()
db.session.commit()
return {'status': 'sucess', 'message': 'Message Deleted'}, 200
| 35.789916
| 182
| 0.621273
| 487
| 4,259
| 5.373717
| 0.250513
| 0.073366
| 0.048911
| 0.047765
| 0.42415
| 0.281238
| 0.265189
| 0.235384
| 0.205579
| 0.172717
| 0
| 0.01885
| 0.240197
| 4,259
| 118
| 183
| 36.09322
| 0.789864
| 0.061047
| 0
| 0.333333
| 0
| 0.012821
| 0.214322
| 0.051437
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089744
| false
| 0
| 0.064103
| 0
| 0.423077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac35d88701fa7c3171d4b1e9eb134859f289cd2
| 5,380
|
py
|
Python
|
volttron/platform/vip/agent/subsystems/heartbeat.py
|
rmay-intwine/volttron
|
a449f70e32f73ff0136a838d0feddb928ede6298
|
[
"Apache-2.0"
] | null | null | null |
volttron/platform/vip/agent/subsystems/heartbeat.py
|
rmay-intwine/volttron
|
a449f70e32f73ff0136a838d0feddb928ede6298
|
[
"Apache-2.0"
] | null | null | null |
volttron/platform/vip/agent/subsystems/heartbeat.py
|
rmay-intwine/volttron
|
a449f70e32f73ff0136a838d0feddb928ede6298
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2017, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import os
import weakref
from datetime import datetime
from .base import SubsystemBase
from volttron.platform.messaging.headers import TIMESTAMP
from volttron.platform.agent.utils import (get_aware_utc_now,
format_timestamp)
from volttron.platform.scheduling import periodic
from ..errors import Unreachable, VIPError
"""The heartbeat subsystem adds an optional periodic publish to all agents.
Heartbeats can be started with agents and toggled on and off at runtime.
"""
__docformat__ = 'reStructuredText'
__version__ = '1.0'
class Heartbeat(SubsystemBase):
def __init__(self, owner, core, rpc, pubsub, heartbeat_autostart,
heartbeat_period):
self.owner = owner
self.core = weakref.ref(core)
self.pubsub = weakref.ref(pubsub)
self.autostart = heartbeat_autostart
self.period = heartbeat_period
self.enabled = False
self.connect_error = False
def onsetup(sender, **kwargs):
rpc.export(self.start, 'heartbeat.start')
rpc.export(self.start_with_period, 'heartbeat.start_with_period')
rpc.export(self.stop, 'heartbeat.stop')
rpc.export(self.restart, 'heartbeat.restart')
rpc.export(self.set_period, 'heartbeat.set_period')
def onstart(sender, **kwargs):
if self.autostart:
self.start()
core.onsetup.connect(onsetup, self)
core.onstart.connect(onstart, self)
core.onconnected.connect(self.reconnect)
def start(self):
"""RPC method
Starts an agent's heartbeat.
"""
if not self.enabled:
self.scheduled = self.core().schedule(periodic(self.period), self.publish)
self.enabled = True
def start_with_period(self, period):
"""RPC method
Set period and start heartbeat.
:param period: Time in seconds between publishes.
"""
self.set_period(period)
self.start()
def reconnect(self, sender, **kwargs):
if self.connect_error:
self.restart()
self.connect_error = False
def stop(self):
"""RPC method
Stop an agent's heartbeat.
"""
if self.enabled:
# Trap the fact that scheduled may not have been
# set yet if the start hasn't been called.
try:
self.scheduled.cancel()
except AttributeError:
pass
self.enabled = False
def restart(self):
"""RPC method
Restart the heartbeat with the current period. The heartbeat will
be immediately sending the heartbeat to the message bus.
"""
self.stop()
self.start()
def set_period(self, period):
"""RPC method
Set heartbeat period.
:param period: Time in seconds between publishes.
"""
if self.enabled:
self.stop()
self.period = period
self.start()
else:
self.period = period
def publish(self):
topic = 'heartbeat/' + self.core().identity
headers = {TIMESTAMP: format_timestamp(get_aware_utc_now())}
message = self.owner.vip.health.get_status_value()
try:
self.pubsub().publish('pubsub', topic, headers, message)
except Unreachable as exc:
self.connect_error = True
self.stop()
| 34.487179
| 86
| 0.666729
| 669
| 5,380
| 5.301943
| 0.38864
| 0.019735
| 0.025374
| 0.028193
| 0.113617
| 0.081195
| 0.0468
| 0.024246
| 0
| 0
| 0
| 0.005987
| 0.254833
| 5,380
| 155
| 87
| 34.709677
| 0.878773
| 0.429368
| 0
| 0.242857
| 0
| 0
| 0.046647
| 0.00984
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.014286
| 0.114286
| 0
| 0.271429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac367d8d5ec9f368f230df751f19e5799e20bdd
| 18,984
|
py
|
Python
|
datasets/experimental/ni_superalloys/Ni_superalloy.py
|
kyawlin/smlb
|
79c757d7fc040fb30ad44410be158b3ce3bdf30d
|
[
"Apache-2.0"
] | null | null | null |
datasets/experimental/ni_superalloys/Ni_superalloy.py
|
kyawlin/smlb
|
79c757d7fc040fb30ad44410be158b3ce3bdf30d
|
[
"Apache-2.0"
] | null | null | null |
datasets/experimental/ni_superalloys/Ni_superalloy.py
|
kyawlin/smlb
|
79c757d7fc040fb30ad44410be158b3ce3bdf30d
|
[
"Apache-2.0"
] | null | null | null |
"""Ni-Superalloy dataset.
Scientific Machine Learning Benchmark
A benchmark of regression models in chem- and materials informatics.
2019, Brendan Folie, Citrine Informatics.
See class NiSuperalloyDataset for details.
"""
import os
import json
import zipfile
from typing import List, Optional, Tuple, Union
import numpy as np
from smlb.exceptions import InvalidParameterError
from smlb.parameters import params
from smlb.tabular_data import TabularData
class NiSuperalloyDataset(TabularData):
"""
Ni-Superalloy dataset.
Based on:
Bryce D. Conduit, Nicholas G. Jones, Howard J. Stone, Gareth John Conduit:
Design of a nickel-base superalloy using a neural network, Materials & Design 131: 358-365,
Elsevier, 2017. DOI 10.1016/j.matdes.2017.06.007
The dataset was downloaded from the Citrination platform (https://citrination.com),
dataset identifier #153493, Version 10.
There are 2800 rows.
The data have columns for composition (25 elements are present in at least one row),
whether the alloy was powder processed (0 or 1), whether it was pressure treated (0 or 1),
heat treatment time (hours) and temperature (degrees Celcius) for up to 4 heat treatment steps,
the total time spent in heat treatment (hours), the maximum heat treatment temperature
(degrees Celcius), and the area under the time-temperature curve (degrees Celcius * hours).
A value of 0 generally implies that the heat treatment step was not done, but there
are some missing values. The total time and max temperature are generally more reliable
than the individual heating steps. The total compositions do not always add up to 100%,
but with about a dozen exceptions they always add up to somewhere between 95% and 105%.
There are also three columns for a pressure treatment step (temperature, time, pressure),
but since only 51 rows have non-zero entries, this information is not used.
There are 5 labels: ultimate tensile strength (MPa), elongation (unitless), stress rupture
stress (MPa), stress rupture time (hours), and yield strength (MPa). Tensile strength and
elongation occur together in 898 rows, stress rupture stress and time occur together in
856 rows, and yield strength occurs in 1046 rows. 898+856+1046=2800, so every row has exactly
one output set. The other values are denoted as NaN.
"""
DEFAULT_PATH = os.path.split(os.path.realpath(__file__))[0] + "/ni_superalloys_3.json.zip"
POWDER_PROCESSED_NO = 0
POWDER_PROCESSED_YES = 1
def __init__(
self, labels_to_load: Optional[Union[str, List[str]]] = None, ignore_dubious: bool = False
):
"""Initialize Ni-superalloy dataset with specified labels.
Parameters:
labels_to_load (str or List[str]): which labels to load. Options are
'Yield Strength', 'Ultimate Tensile Strength', 'Stress Rupture Time',
'Stress Rupture Stress', and 'Elongation'.
If None, then all labels are loaded.
ignore_dubious: whether or not to ignore samples that have something
questionable about them
"""
labels_to_load = params.optional_(
labels_to_load,
lambda arg: params.any_(
arg, params.string, lambda arg: params.sequence(arg, type_=str),
),
)
ignore_dubious = params.boolean(ignore_dubious)
filepath = self.DEFAULT_PATH
data, labels = self._load_data_and_labels(filepath, labels_to_load, ignore_dubious)
super().__init__(data=data, labels=labels)
def _load_data_and_labels(
self,
filepath: str,
labels_to_load: Optional[List[str]] = None,
ignore_dubious: bool = False,
):
"""Load data and labels from .json file."""
raw = self._unzip_json_file(filepath)
if ignore_dubious:
raw = [e for e in raw if self._filter_dubious(e)]
# dtype=object is necessary because this is a mixed-type array (float and string)
data = np.array([self._parse_json_data(e) for e in raw], dtype=object)
labels = np.array([self._parse_json_labels(e, labels_to_load) for e in raw], dtype=float)
return data, labels
@staticmethod
def _unzip_json_file(filepath: str):
"""Open and read zipped json file."""
filename = os.path.basename(filepath)
assert (
filename[-4:] == ".zip"
), f"File path must point to a .zip file, instead got '{filepath}'"
with zipfile.ZipFile(filepath) as zf:
unzipped_filename = filename[:-4]
with zf.open(unzipped_filename) as fp:
raw = json.load(fp)
return raw
@staticmethod
def _extract_raw_composition(entry: dict) -> List[dict]:
"""Get composition in its raw form."""
raw_composition = entry.get("composition")
if raw_composition is None or not isinstance(raw_composition, list):
raise InvalidParameterError(
expected="Chemical composition as a list", got=raw_composition
)
return raw_composition
@staticmethod
def _filter_dubious(entry: dict) -> bool:
"""
Determine whether or not a json entry has something questionable about it.
Currently, the only thing filtered on is if the composition has an asterisk in it,
which occurs for 6 samples.
Parameters:
entry (dict): A json entry corresponding to a row in the dataset.
Returns: bool
True if the composition contains an asterisk.
"""
raw_composition = NiSuperalloyDataset._extract_raw_composition(entry)
composition_dict = NiSuperalloyDataset._parse_composition_as_dict(raw_composition)
composition_dict_float, exception_caught = NiSuperalloyDataset._dict_values_to_float(
composition_dict
)
return not exception_caught
def _parse_json_data(self, entry: dict):
"""
Helper function to parse data in a single row from the raw json.
Parameters:
entry (dict): A json entry corresponding to a row in the dataset.
Returns: array
Array of data in this row.
"""
assert entry["category"] == "system.chemical"
raw_composition = NiSuperalloyDataset._extract_raw_composition(entry)
composition: str = self._parse_composition(raw_composition)
properties = entry.get("properties")
if properties is None or not isinstance(properties, list):
raise InvalidParameterError(
expected="A list of dictionaries, one for each property", got=properties
)
heat_treatment_1_time = self._get_scalar_property(
properties, "Heat treatment 1 Time", units="hours", default_value=0
)
heat_treatment_1_temp = self._get_scalar_property(
properties, "Heat treatment 1 Temperature", units="$^{\\circ}$C", default_value=0
)
heat_treatment_2_time = self._get_scalar_property(
properties, "Heat treatment 2 Time", units="hours", default_value=0
)
heat_treatment_2_temp = self._get_scalar_property(
properties, "Heat treatment 2 Temperature", units="$^{\\circ}$C", default_value=0
)
heat_treatment_3_time = self._get_scalar_property(
properties, "Heat treatment 3 Time", units="hours", default_value=0
)
heat_treatment_3_temp = self._get_scalar_property(
properties, "Heat treatment 3 Temperature", units="$^{\\circ}$C", default_value=0
)
heat_treatment_4_time = self._get_scalar_property(
properties, "Heat treatment 4 Time", units="hours", default_value=0
)
heat_treatment_4_temp = self._get_scalar_property(
properties, "Heat treatment 4 Temperature", units="$^{\\circ}$C", default_value=0
)
total_heat_treatment_time = self._get_scalar_property(
properties, "Total heat treatment time", units="hours"
)
max_heat_treatment_temp = self._get_scalar_property(
properties, "Max Heat Treatment Temperature", units="$^{\\circ}$C"
)
area_under_heat_treatment_curve = self._get_scalar_property(
properties, "Area under heat treatment curve", units="$^{\\circ}$C * hours"
)
powder_processed_dict = {"No": self.POWDER_PROCESSED_NO, "Yes": self.POWDER_PROCESSED_YES}
powder_processed = self._get_categorical_property(
properties, "Powder processed", categories_dict=powder_processed_dict
)
data_array = [
composition,
heat_treatment_1_time,
heat_treatment_1_temp,
heat_treatment_2_time,
heat_treatment_2_temp,
heat_treatment_3_time,
heat_treatment_3_temp,
heat_treatment_4_time,
heat_treatment_4_temp,
total_heat_treatment_time,
max_heat_treatment_temp,
area_under_heat_treatment_curve,
powder_processed,
]
return data_array
def _parse_json_labels(self, entry: dict, labels_to_load: Optional[List[str]] = None):
"""
Helper function to parse labels in a single row from the raw json.
Parameters:
entry (dict): A json entry corresponding to a row in the dataset.
labels_to_load (List[str]): Optional list of labels to load.
Returns: array
Array of labels in this row that we are interested in.
"""
if labels_to_load is None:
labels_to_load = [
"Yield Strength",
"Ultimate Tensile Strength",
"Stress Rupture Time",
"Stress Rupture Stress",
"Elongation",
]
properties = entry.get("properties")
if properties is None or not isinstance(properties, list):
raise InvalidParameterError(
expected="A list of dictionaries, one for each property", got=properties
)
labels_array = []
for label in labels_to_load:
labels_array.append(self._get_scalar_property(properties, label, default_value=None))
return labels_array
@staticmethod
def _parse_composition(raw_composition: List[dict]) -> str:
"""
Helper function to parse composition as a string.
Parameters:
raw_composition (List[dict]): A list, each entry of which corresponds to an element.
An entry is a dict with an 'element' key and an 'idealWeightPercent' key.
The element is a string (e.g., 'Cu') and the weight percent is another dict
with a single key, 'value', pointing to a floating point number.
The values are in percentage points, and add up to ~100.
Returns: str
Chemical composition as string, e.g. 'Al5.5Ni94.0W0.5'
"""
composition_dict = NiSuperalloyDataset._parse_composition_as_dict(raw_composition)
composition_dict_float, _ = NiSuperalloyDataset._dict_values_to_float(composition_dict)
composition_str: str = ""
for element_name, element_amount in composition_dict_float.items():
if element_amount > 0:
composition_str += element_name + str(element_amount)
return composition_str
@staticmethod
def _parse_composition_as_dict(raw_composition: List[dict]) -> dict:
"""
Helper function to parse composition as a dictionary.
Parameters:
raw_composition (List[dict]): A list, each entry of which corresponds to an element.
An entry is a dict with an 'element' key and an 'idealWeightPercent' key.
The element is a string (e.g., 'Cu') and the weight percent is another dict
with a single key, 'value', pointing to a floating point number.
The values are in percentage points, and add up to ~100 (but not exactly).
Returns: dict
Chemical composition as a dictionary with the elements as keys
and their raw amounts as values
"""
composition_dict = dict()
for entry in raw_composition:
try:
element_name = entry["element"]
element_amount = entry["idealWeightPercent"]["value"]
except KeyError:
raise InvalidParameterError(
expected="Element amount as a dictionary of the form\n"
"{'element': <element name>,"
"'idealWeightPercent': "
"{'value': <element amount>}}",
got=entry,
)
composition_dict[element_name] = element_amount
return composition_dict
@staticmethod
def _dict_values_to_float(d: dict) -> Tuple[dict, bool]:
"""
Convert a dictionary's values to their floating point representations, if possible.
Parameters:
d: a dictionary
Returns: dict, bool
A modified version of `d`, and a boolean flag indicating whether or not
an Exception was caught
"""
d_copy = dict()
exception_caught = False
for key, value in d.items():
try:
value_float = float(value)
except ValueError:
exception_caught = True
value_float = NiSuperalloyDataset._parse_peculiar_amount(value)
d_copy[key] = value_float
return d_copy, exception_caught
@staticmethod
def _parse_peculiar_amount(x: str) -> float:
"""
Deals with dataset-specific-peculiarities in composition amounts.
Some composition amounts have a trailing asterisk, e.g., '2*'. The meaning is unclear.
Perhaps it denotes that the amount is imprecise. In any case, they only occur in 6
samples. The trailing asterisk will be ignored.
"""
if x[-1] == "*":
x = x[:-1]
try:
return float(x)
except ValueError:
raise InvalidParameterError("Amount as a float", x)
def _get_scalar_property(
self,
properties: List[dict],
property_name: str,
units: Optional[str] = None,
default_value: Optional[float] = None,
) -> float:
"""
A helper function to get a single scalar property.
This calls _get_single_property and then checks that the result can be
turned into a float.
Parameters:
properties: A list of dicts, each of which is a single property.
property_name: The name of the property to get the value of.
units: Optional expected units string.
default_value: Value to return if `property_name` is not present.
Raises:
InvalidParameterError: if the value cannot be expressed as a float
Returns: float
The value of the desired property.
"""
try:
val = self._get_single_property(properties, property_name, units, default_value)
if val is None:
return None
return float(val)
except ValueError:
raise InvalidParameterError(
expected=f"Property {property_name} should have a value "
f"that can be expressed as a float",
got=properties,
)
def _get_categorical_property(
self, properties: List[dict], property_name: str, categories_dict: dict
) -> int:
"""
Helper function to get a single categorical property as an int.
Parameters:
properties: A list of dicts, each of which is a single property.
property_name: The name of the property to get the value of.
categories_dict: Dict from the categorical property (string) to a unique integer value.
Raises:
InvalidParameterError: if the value is not in the expected list of possible categories
as given by the keys in `categories_dict`
Returns: int
An integer that corresponds to the value of the desired property.
"""
category = self._get_single_property(properties, property_name)
try:
return categories_dict[category]
except KeyError:
raise InvalidParameterError(
f"A value in the array: {categories_dict.keys()}", category
)
@staticmethod
def _get_single_property(
properties: List[dict], property_name: str, units: Optional[str] = None, default_value=None
):
"""
Helper function to get a single property.
Parameters:
properties: A list of dicts, each of which is a single property. Each entry is expected
to have a 'name' field that corresponds to the property name and a `scalars` field
that is a list with one entry, a dict of the form {'value': <property value>}.
It may also have a 'units' field.
property_name: The name of the property to get the value of. `properties` is expected
to have exactly one entry with the 'name' field equal to `property_name`.
units: Optional expected value of 'units' field. If specified, then there must be a
'units' field and its value must correspond to `units`.
default_value: Value to return if `property_name` is not present.
Raises:
InvalidParameterError: if `properties` does not conform to the expected structure
Returns:
The value of the property `property_name`
"""
matching_props = [prop for prop in properties if prop.get("name") == property_name]
if len(matching_props) == 0:
return default_value
elif len(matching_props) > 1:
raise InvalidParameterError(
expected=f"Only one entry in properties should have name" f" '{property_name}'",
got=properties,
)
matching_prop = matching_props[0]
try:
scalars = matching_prop["scalars"]
assert len(scalars) == 1
val = scalars[0]["value"]
if units is not None:
assert matching_prop["units"] == units
except (KeyError, AssertionError):
units_str = "" if units is None else f", 'units': {units}"
raise InvalidParameterError(
expected="Property as a dictionary of the form\n"
"{'name': <property name>, 'scalars': "
"[{'value': <property value>}]" + units_str + "}",
got=matching_prop,
)
return val
| 40.650964
| 99
| 0.629794
| 2,284
| 18,984
| 5.073555
| 0.171629
| 0.04263
| 0.014498
| 0.021747
| 0.346911
| 0.313687
| 0.292458
| 0.253193
| 0.189506
| 0.177339
| 0
| 0.011178
| 0.29783
| 18,984
| 466
| 100
| 40.738197
| 0.85814
| 0.372314
| 0
| 0.178862
| 0
| 0
| 0.115858
| 0.006523
| 0
| 0
| 0
| 0
| 0.020325
| 1
| 0.056911
| false
| 0
| 0.03252
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac43cedb06c0b3488172628809f67d3f8c8275d
| 2,520
|
py
|
Python
|
pytorch_lightning/accelerators/cpu_backend.py
|
ozen/pytorch-lightning
|
3b0b402d30fa19e0fef7d150c30ff4bb14a64230
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/accelerators/cpu_backend.py
|
ozen/pytorch-lightning
|
3b0b402d30fa19e0fef7d150c30ff4bb14a64230
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/accelerators/cpu_backend.py
|
ozen/pytorch-lightning
|
3b0b402d30fa19e0fef7d150c30ff4bb14a64230
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from pytorch_lightning.accelerators.base_backend import Accelerator
from pytorch_lightning.utilities import AMPType, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
class CPUBackend(Accelerator):
def __init__(self, trainer, cluster_environment=None):
super().__init__(trainer, cluster_environment)
def setup(self, model):
# run through amp wrapper
if self.trainer.amp_backend:
raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option')
# call setup after the ddp process has connected
self.trainer.call_setup_hook(model)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.setup_optimizers(model)
self.trainer.model = model
def train(self):
model = self.trainer.model
# set up training routine
self.trainer.train_loop.setup_training(model)
# train or test
results = self.train_or_test()
return results
def training_step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.trainer.model.training_step(*args)
else:
output = self.trainer.model.training_step(*args)
return output
def validation_step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.trainer.model.validation_step(*args)
else:
output = self.trainer.model.validation_step(*args)
return output
def test_step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.trainer.model.test_step(*args)
else:
output = self.trainer.model.test_step(*args)
return output
| 34.520548
| 99
| 0.681746
| 317
| 2,520
| 5.305994
| 0.413249
| 0.098098
| 0.0761
| 0.078478
| 0.300238
| 0.268728
| 0.268728
| 0.167658
| 0.167658
| 0.167658
| 0
| 0.00209
| 0.240476
| 2,520
| 72
| 100
| 35
| 0.876698
| 0.284524
| 0
| 0.461538
| 0
| 0
| 0.029164
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.102564
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac4e4fc48c67f3dafab5b728a225aa95eec15e2
| 7,668
|
py
|
Python
|
st2common/st2common/util/pack.py
|
timgates42/st2
|
0e8ae756f30ffe2e017c64bff67830abdee7f7c9
|
[
"Apache-2.0"
] | null | null | null |
st2common/st2common/util/pack.py
|
timgates42/st2
|
0e8ae756f30ffe2e017c64bff67830abdee7f7c9
|
[
"Apache-2.0"
] | 15
|
2021-02-11T22:58:54.000Z
|
2021-08-06T18:03:47.000Z
|
st2common/st2common/util/pack.py
|
timgates42/st2
|
0e8ae756f30ffe2e017c64bff67830abdee7f7c9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import re
import collections
import six
from st2common.util import schema as util_schema
from st2common.constants.pack import MANIFEST_FILE_NAME
from st2common.constants.pack import PACK_REF_WHITELIST_REGEX
from st2common.content.loader import MetaLoader
from st2common.persistence.pack import Pack
from st2common.exceptions.apivalidation import ValueValidationException
from st2common.util import jinja as jinja_utils
__all__ = [
'get_pack_ref_from_metadata',
'get_pack_metadata',
'get_pack_warnings',
'get_pack_common_libs_path_for_pack_ref',
'get_pack_common_libs_path_for_pack_db',
'validate_config_against_schema',
'normalize_pack_version'
]
# Common format for python 2.7 warning
if six.PY2:
PACK_PYTHON2_WARNING = "DEPRECATION WARNING: Pack %s only supports Python 2.x. " \
"Python 2 support will be dropped in future releases. " \
"Please consider updating your packs to work with Python 3.x"
else:
PACK_PYTHON2_WARNING = "DEPRECATION WARNING: Pack %s only supports Python 2.x. " \
"Python 2 support has been removed since st2 v3.4.0. " \
"Please update your packs to work with Python 3.x"
def get_pack_ref_from_metadata(metadata, pack_directory_name=None):
"""
Utility function which retrieves pack "ref" attribute from the pack metadata file.
If this attribute is not provided, an attempt is made to infer "ref" from the "name" attribute.
:rtype: ``str``
"""
pack_ref = None
# The rules for the pack ref are as follows:
# 1. If ref attribute is available, we used that
# 2. If pack_directory_name is available we use that (this only applies to packs
# which are in sub-directories)
# 2. If attribute is not available, but pack name is and pack name meets the valid name
# criteria, we use that
if metadata.get('ref', None):
pack_ref = metadata['ref']
elif pack_directory_name and re.match(PACK_REF_WHITELIST_REGEX, pack_directory_name):
pack_ref = pack_directory_name
else:
if re.match(PACK_REF_WHITELIST_REGEX, metadata['name']):
pack_ref = metadata['name']
else:
msg = ('Pack name "%s" contains invalid characters and "ref" attribute is not '
'available. You either need to add "ref" attribute which contains only word '
'characters to the pack metadata file or update name attribute to contain only'
'word characters.')
raise ValueError(msg % (metadata['name']))
return pack_ref
def get_pack_metadata(pack_dir):
"""
Return parsed metadata for a particular pack directory.
:rtype: ``dict``
"""
manifest_path = os.path.join(pack_dir, MANIFEST_FILE_NAME)
if not os.path.isfile(manifest_path):
raise ValueError('Pack "%s" is missing %s file' % (pack_dir, MANIFEST_FILE_NAME))
meta_loader = MetaLoader()
content = meta_loader.load(manifest_path)
if not content:
raise ValueError('Pack "%s" metadata file is empty' % (pack_dir))
return content
def get_pack_warnings(pack_metadata):
"""
Return warning string if pack metadata indicates only python 2 is supported
:rtype: ``str``
"""
warning = None
versions = pack_metadata.get('python_versions', None)
pack_name = pack_metadata.get('name', None)
if versions and set(versions) == set(['2']):
warning = PACK_PYTHON2_WARNING % pack_name
return warning
def validate_config_against_schema(config_schema, config_object, config_path,
pack_name=None):
"""
Validate provided config dictionary against the provided config schema
dictionary.
"""
# NOTE: Lazy improt to avoid performance overhead of importing this module when it's not used
import jsonschema
pack_name = pack_name or 'unknown'
schema = util_schema.get_schema_for_resource_parameters(parameters_schema=config_schema,
allow_additional_properties=True)
instance = config_object
try:
cleaned = util_schema.validate(instance=instance, schema=schema,
cls=util_schema.CustomValidator, use_default=True,
allow_default_none=True)
for key in cleaned:
if (jinja_utils.is_jinja_expression(value=cleaned.get(key)) and
"decrypt_kv" in cleaned.get(key) and config_schema.get(key).get('secret')):
raise ValueValidationException('Values specified as "secret: True" in config '
'schema are automatically decrypted by default. Use '
'of "decrypt_kv" jinja filter is not allowed for '
'such values. Please check the specified values in '
'the config or the default values in the schema.')
except jsonschema.ValidationError as e:
attribute = getattr(e, 'path', [])
if isinstance(attribute, (tuple, list, collections.Iterable)):
attribute = [str(item) for item in attribute]
attribute = '.'.join(attribute)
else:
attribute = str(attribute)
msg = ('Failed validating attribute "%s" in config for pack "%s" (%s): %s' %
(attribute, pack_name, config_path, six.text_type(e)))
raise jsonschema.ValidationError(msg)
return cleaned
def get_pack_common_libs_path_for_pack_ref(pack_ref):
pack_db = Pack.get_by_ref(pack_ref)
pack_common_libs_path = get_pack_common_libs_path_for_pack_db(pack_db=pack_db)
return pack_common_libs_path
def get_pack_common_libs_path_for_pack_db(pack_db):
"""
Return the pack's common lib path. This is the path where common code for sensors
and actions are placed.
For example, if the pack is at /opt/stackstorm/packs/my_pack, you can place
common library code for actions and sensors in /opt/stackstorm/packs/my_pack/lib/.
This common library code is only available for python sensors and actions. The lib
structure also needs to follow a python convention with a __init__.py file.
:param pack_db: Pack DB model
:type pack_db: :class:`PackDB`
:rtype: ``str``
"""
pack_dir = getattr(pack_db, 'path', None)
if not pack_dir:
return None
libs_path = os.path.join(pack_dir, 'lib')
return libs_path
def normalize_pack_version(version):
"""
Normalize old, pre StackStorm v2.1 non valid semver version string (e.g. 0.2) to a valid
semver version string (0.2.0).
:rtype: ``str``
"""
version = str(version)
version_seperator_count = version.count('.')
if version_seperator_count == 1:
version = version + '.0'
return version
| 36.514286
| 100
| 0.666275
| 1,013
| 7,668
| 4.860809
| 0.27542
| 0.022746
| 0.019903
| 0.025589
| 0.139115
| 0.095857
| 0.075955
| 0.075955
| 0.045085
| 0.045085
| 0
| 0.008607
| 0.257564
| 7,668
| 209
| 101
| 36.688995
| 0.856315
| 0.276865
| 0
| 0.056075
| 0
| 0
| 0.221292
| 0.028476
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065421
| false
| 0
| 0.121495
| 0
| 0.261682
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac65f8d4a911234497385069b667c9dd2f68934
| 21,364
|
py
|
Python
|
pixelproject/grid.py
|
MickaelRigault/pixelproject
|
d98db99a8e69eafa7a979c02a099e4c07f5fd568
|
[
"Apache-2.0"
] | null | null | null |
pixelproject/grid.py
|
MickaelRigault/pixelproject
|
d98db99a8e69eafa7a979c02a099e4c07f5fd568
|
[
"Apache-2.0"
] | null | null | null |
pixelproject/grid.py
|
MickaelRigault/pixelproject
|
d98db99a8e69eafa7a979c02a099e4c07f5fd568
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
#
import warnings
import numpy as np
UNIT_SQUARE = np.asarray([[0,0],[0,1],[1,1],[1,0]])-0.5
from propobject import BaseObject
from shapely import geometry
import pandas
import geopandas
# ======================= #
# #
# Functions #
# #
# ======================= #
def get_simple_grid(xbounds, ybounds, shift_origin=None):
""" """
xbounds = np.atleast_1d(xbounds)
if len(xbounds)==1:
xmin,xmax = 0,xbounds[0]
else:
xmin,xmax = xbounds
ybounds = np.atleast_1d(ybounds)
if len(ybounds)==1:
ymin,ymax = 0,ybounds[0]
else:
ymin,ymax = ybounds
pixels = np.mgrid[xmin:xmax,ymin:ymax]
pixels2_flat = np.concatenate(pixels.T, axis=0)
if shift_origin is not None:
# not += because conflict between int and float array
pixels2_flat = pixels2_flat+ shift_origin
return Grid(pixels2_flat, UNIT_SQUARE)
# ======================= #
# #
# Classes #
# #
# ======================= #
class GridProjector( BaseObject ):
""" """
PROPERTIES = ["gridin", "gridout"]
DERIVED_PROPERTIES = ["gridinterest"]
def __init__(self, grid_in=None, grid_out=None):
""" """
if grid_in is not None:
self.set_grid(grid_in, "in")
if grid_out is not None:
self.set_grid(grid_out, "out")
# =================== #
# Methods #
# =================== #
# --------- #
# SETTER #
# --------- #
def set_grid(self, grid, which="in"):
""" """
if which not in ["in","out"]:
raise ValueError("Which should either be 'in' our 'out'")
self._properties["grid%s"%which] = grid
self._derived_properties["gridinterest"] = None
def _measure_gridinterest_(self):
""" """
# -- internal -- #
def localdef_get_area(l):
return l.geometry.area/self.gridin.geodataframe.iloc[l.id_1].geometry.area
# -------------- #
if self.gridin is not None and self.gridout is not None:
#
# Most likely there is a faster method if is_shape_unique
#
self._derived_properties["gridinterest"] = geopandas.overlay(self.gridin.geodataframe,
self.gridout.geodataframe,
how='intersection')
self.gridinterest["area"] = self.gridinterest.apply(localdef_get_area, axis=1)
else:
warnings.warn("Cannot measure gridinterest, because gridin and/or gridout is/are None")
# -------------- #
# Measurement #
# -------------- #
def project_data(self, data, as_serie=True, use="sum"):
""" Use gridinteresect
Parameters
----------
data: [ndarray or string or pandas.Serie]
data associated to gridin that should be projected in gridout.
could be:
- ndarray: must have the same length as gridin
- string: name of a gridin column (pandas)
- pandas.Serie: serie that will be matched with gridin
"""
# Calcul itself
projected_data = self._project_data_(self._parse_data_(data), use=use)
if as_serie:
return projected_data
projected_data_array = np.zeros( len(self.gridout.geodataframe) )
projected_data_array[projected_data.index.values] = projected_data.values
return projected_data_array
def _project_data_(self, data, use="sum"):
""" """
self.gridinterest["_tmp"] = data[ self.gridin.geodataframe.loc[ self.gridinterest["id_1"]].index
] * self.gridinterest["area"]
return getattr(self.gridinterest.groupby("id_2")["_tmp"],use)()
def _parse_data_(self,data):
"""
Parameters
----------
data: [ndarray or string or pandas.Serie]
data associated to gridin that should be projected in gridout.
could be:
- ndarray: must have the same length as gridin
- string: name of a gridin column (pandas)
- pandas.Serie: serie that will be matched with gridin
Returns
-------
ndarray
"""
if type(data) == str:
if data not in self.gridin.geodataframe.columns:
raise ValueError("Unknown gridin column '%s'"%data)
return self.gridin.geodataframe[data].values
elif type(data) == pandas.Series:
return data.values
elif len(data) != len(self.gridin.geodataframe):
raise ValueError("data given as ndarray but lengthes do not match")
return data
# =================== #
# Properties #
# =================== #
@property
def gridin(self):
""" """
return self._properties["gridin"]
@property
def gridout(self):
""" """
return self._properties["gridout"]
@property
def gridinterest(self):
""" """
if self._derived_properties["gridinterest"] is None:
self._measure_gridinterest_()
return self._derived_properties["gridinterest"]
class Grid( BaseObject ):
PROPERTIES = ["pixels", "shape"]
SIDE_PROPERTIES = ["indexes"]
DERIVED_PROPERTIES = ["vertices","geodataframe", "triangulation"]
def __init__(self, pixels=None, shape=UNIT_SQUARE, indexes=None):
""" """
if pixels is not None:
self.set_pixels(pixels,shape=shape)
if indexes is not None:
self.set_indexes(indexes)
# =================== #
# Methods #
# =================== #
@classmethod
def from_stamps(cls, stamp, origin=[0,0]):
""" stamps are 2d array, something you could to ax.imshow(stamps)
data will be stored as 'data' in the grid's dataframe
"""
this = get_simple_grid(*np.shape(stamp), shift_origin=origin)
this.add_data(np.ravel(stamp), "data")
return this
@classmethod
def from_vertices(cls, vertices, indexes=None):
""" directly provide the vertices
Parameters:
-----------
vertices: [list of array or dictionary]
The vertices of all the grid entries.
Could have two format:
- list of array: [[vert_1],[vert_2],....], then you may want to provide indexes
- dictionary: {id_1:vert_1,id_2: vert_2, ...}
if a dictionary is provided, the indexes will be set by the vertices.
indexes: [list or None] -optional-
(Ignored if vertices is a dict)
If you provide vertices as a list of vertices, you can provide the
indexes of each of the vertices.
-> if None, then indexes = np.arange(len(vertices))
Returns
-------
Grid
"""
this = cls()
if type(vertices) is dict:
indexes, vertices = list(vertices.keys()), list(vertices.values())
this.set_vertices(vertices)
if indexes is not None:
this.set_indexes(indexes)
return this
@classmethod
def set_from(cls, datainput):
""" Creates a new Grid objects from the given input data:
Parameters
----------
datainput: [geopandas.geodataframe.GeoDataFrame or ndarray]
this could either be a:
- geodataframe (and this calls self.set_geodataframe)
- geoSeries
- ndarray: if 3-shaped, this calls set_vertices ;
if 2-shaped, this calls set_pixels.
Returns
-------
Grid
"""
this = cls()
if type(datainput) == geopandas.geodataframe.GeoDataFrame:
this.set_geodataframe(datainput)
return this
if type(datainput) == np.ndarray:
if len(np.shape( datainput) ) == 3: # vertices
this.set_vertices(datainput)
elif len(np.shape( datainput) ) == 3: # pixels
this.set_pixels(datainput)
else:
raise TypeError("cannot parse the shape of the given datainput")
return this
raise TypeError("cannot parse the format of the given input")
# --------- #
# SETTER #
# --------- #
def set_indexes(self, indexes, update=True):
""" provide the indexes associated to each pixels
Parameters
----------
indexes: [ndarray]
indexes associated to the pixels.
This should have the length equal to th number of pixels (if any).
update: [bool] -optional-
should the geodataframe be updated ?
[use True if you are not sure]
Returns
-------
Void
"""
if self.pixels is not None and len(indexes) != self.npixels:
raise AssertionError("not the same number of indexes as the number of pixels")
self._side_properties["indexes"] = indexes
if update:
self._update_geodataframe_()
def set_pixels(self, pixels, shape=None, update=True):
""" provide the pixels.
Pixels define the position up on which the geometries are defined.
NB: vertices = pixels+shape
"""
# Setting the pixels
if np.shape(pixels)[-1] != 2:
raise ValueError("pixels must be [N,2] arrays")
self._properties["pixels"] = np.asarray(pixels)
if shape is not None:
self.set_pixelshapes(shape, update=False)
if update:
self._update_geodataframe_()
def set_pixelshapes(self, shape, update=True):
""" """
# Setting the pixel shape.s
if len(np.shape(shape))==2:
self._properties["shape"] = np.asarray(shape)
elif len(np.shape(shape))==3:
if self.pixels is not None and np.shape(shape)[0] != self.npixels:
raise AssertionError("`shape` must be unique or have the same lenth as pixels")
self._properties["shape"] = np.asarray(shape)
else:
raise ValueError("Cannot parse the given shape, must be [M,2] or [N,M,2] when N is the number of pixel and M the number of vertices")
if update:
self._update_geodataframe_()
def set_vertices(self, vertices, overwrite=False, **kwargs):
""" """
if not overwrite and (self.pixels is not None and self.shape is not None):
raise ValueError("Pixels and shape already defined. set the overwrite option to true, to update vertices")
try:
pixels = np.mean(vertices, axis=1)
except:
# Means vertices have different size.
self._derived_properties["vertices"] = vertices
pixels = np.asarray([np.mean(v_, axis=0) for v_ in vertices])
self.set_pixels(pixels, None, **kwargs)
return
self._derived_properties["vertices"] = np.asarray(vertices)
shape = self.vertices - pixels[:,None]
shape_unique = np.unique(shape, axis=0)
if len(shape_unique)==1:
shape = shape_unique[0]
self.set_pixels(pixels, shape, **kwargs)
def set_geodataframe(self, geodataframe, overwrite=False):
""" """
if not overwrite and (self.pixels is not None and self.shape is not None):
raise ValueError("Pixels and shape already defined. set the overwrite option to true, to update geodataframe")
if "geometry" not in geodataframe.columns:
raise TypeError("The given geodataframe does not have 'geometry' column. It is required")
self._derived_properties["geodataframe"] = geodataframe
if "id" not in geodataframe.columns:
self.geodataframe["id"] = self.indexes if self.pixels is not None else np.arange( len(geodataframe) )
# - get the vertices:
def get_verts(poly_):
return np.stack(poly_.exterior.xy).T[:-1]
vertices = geodataframe["geometry"].apply(get_verts).values
self.set_vertices(vertices, update=False) # don't update the geodataframe
# --------- #
# UPDATE #
# --------- #
def _update_geodataframe_(self):
""" """
dataseries = self.get_geoseries()
x,y = self.pixels.T
self._derived_properties["geodataframe"] = \
geopandas.GeoDataFrame({'geometry': dataseries,
'id':self.indexes,
'x':x,'y':y})
def add_data(self, data, name, indexes=None, inplace=True):
""" """
if indexes is None:
indexes = self.indexes
s_ = pandas.Series(data, name=name, index=indexes)
if not inplace:
return self.geodataframe.join(s_)
self._derived_properties["geodataframe"] = self.geodataframe.join(s_)
# --------- #
# GETTER #
# --------- #
def get_geoseries(self):
""" build a new geodataframe and returns it. """
import geopandas
return geopandas.GeoSeries([geometry.Polygon(v) for v in self.vertices])
def get_triangulation_grid(self):
""" Returns a grid of triangulation. """
return Grid.set_from( np.concatenate(self.triangulation, axis=0) )
def get_pixels_in(self, polygon, invert=False):
""" checks if the centroid of the pixel is in or out the given shapely polygon.
Parameters
----------
polygon: [shapely.geometry]
reference polygon
invert: [bool] -optional-
Get the pixel inside the polygon [invert=False] or outsite [invert=True]
Returns
-------
list of pixels and boolean mask
"""
from shapely import vectorized
flagin = vectorized.contains(polygon, *self.pixels.T)
if invert:
flagin = ~flagin
return self.pixels[flagin], flagin
# --------- #
# Project #
# --------- #
def project_to(self, othergrid, column="*", asgrid=True, use="sum"):
""" project data in the given grid
Parameters
----------
othergrid: [Grid]
New grid where data should be projected to
column: [str/None/list of] -optional-
Which data should be projected ?
If None or '*' all the non-structural columns will be
(structural columns are 'geometry', 'id', 'x', 'y')
asgrid: [bool] -optional-
Should this return a new Grid (actually same object as othergrid)
or a dict [asgrid=False]?
Returns
-------
Grid or dict (see asgrid)
"""
gproj = GridProjector(self, othergrid)
if column is None or column in ["*","all"]:
column = [k for k in self.geodataframe if k not in ['geometry', 'id', 'x', 'y']]
datas = {k:gproj.project_data(k, use=use) for k in column}
if not asgrid:
return datas
# building and setting the new grid
gout = othergrid.__class__.set_from(othergrid.geodataframe)
for k in column:
gout.add_data(datas[k],k)
return gout
def project_to_wcs(self, wcs_, asgrid=True, **kwargs):
""" provide an astropy.wcs.WCS and this will project
the current grid into it (assuming grid's vertices coordinates are in pixels)
Parameters
----------
wcs_: [astropy.wcs.WCS]
The world coordinate solution
asgrid: [bool] -optional-
Should this return a load Grid object or an array of vertices (in degree)
**kwargs goes to wcs_.all_pix2world
Returns
-------
Grid or array (see asgrid)
"""
verts = self.vertices
verts_shape = np.shape(verts)
flatten_verts = np.concatenate(verts, axis=0)
#
flatten_verts_wcs = np.asarray(wcs_.all_pix2world(flatten_verts[:,0],
flatten_verts[:,1], 0,
**kwargs)).T
#
verts_wcs = flatten_verts_wcs.reshape(verts_shape)
if not asgrid:
return verts_wcs
g_wcs = Grid.set_from(verts_wcs)
g_wcs.geodataframe["x_pix"],g_wcs.geodataframe["y_pix"] = self.pixels.T
return g_wcs
def evaluate(self, func, vectorized=True):
""" Evaluate the given function throughout the grid.
This evulation is using polynome triangulation to integrate the
given function inside the polyname using triangle integration.
-> dependency: the integration is made using quadpy.
Examples:
# Remark the np.stack(x, axis=-1).
# This is mandatory since integration is going to send
# x = [ [[....],[...]], [[....],[...]], ... ] for triangles
```python
def get_2dgauss(x, mu=[4,4], cov=[[1,0],[0,2]]):
""" """
return stats.multivariate_normal.pdf(np.stack(x, axis=-1), mean=mu, cov=cov)
```
"""
try:
import quadpy
except ImportError:
raise ImportError("Integration is made using quadpy. pip install quadpy")
# Is Triangulation made ?
if self._derived_properties["triangulation"] is None:
warnings.warn("triangles not defined: deriving triangulation.")
self.derive_triangulation()
# Let's get the triangles
trs = np.stack(self.triangulation)
shape_trs = np.shape(trs)
if len(shape_trs)==4 and vectorized: # All Polygon have the same topology (same amount of vertices)
tr_flat = np.stack(np.concatenate(trs, axis=0), axis=-2)
val = quadpy.triangle.strang_fix_cowper_09().integrate(func,tr_flat).reshape(shape_trs[:2])
else:
val = np.asarray([quadpy.triangle.strang_fix_cowper_09().integrate(func,np.stack(t_, axis=-2)) for t_ in trs])
return np.sum(val, axis=1)
def derive_triangulation(self, fast_unique=True):
""" """
def triangulate(geom):
""" Return triangulate format that quadpy likes """
from shapely import ops
triangles = ops.triangulate(geom)
return np.stack([np.asarray(t.exterior.coords.xy).T[:-1] for t in triangles])
if not self.is_shape_unique or not fast_unique:
self._derived_properties["triangulation"] = self.geodataframe["geometry"].apply(triangulate)
else:
self._derived_properties["triangulation"] = self.pixels[:,None,None] + triangulate(geometry.Polygon(self.shape))
# --------- #
# PLOTTER #
# --------- #
def show(self, column=None, ax=None, edgecolor="0.7", facecolor="None", **kwargs):
""" """
if column is not None:
facecolor=None
return self.geodataframe.plot(column, ax=ax,facecolor=facecolor,
edgecolor=edgecolor, **kwargs)
# =================== #
# Properties #
# =================== #
@property
def pixels(self):
""" """
return self._properties["pixels"]
@property
def npixels(self):
""" """
return len(self.pixels)
@property
def shape(self):
""" """
if self._properties["shape"] is None:
self._properties["shape"] = UNIT_SQUARE
return self._properties["shape"]
# -- Side
@property
def indexes(self):
""" """
if self._side_properties["indexes"] is None:
self._side_properties["indexes"] = np.arange(self.npixels)
return self._side_properties["indexes"]
# -- Derived
@property
def vertices(self):
""" """
if self._derived_properties["vertices"] is None and (self.pixels is not None and self.shape is not None):
self._derived_properties["vertices"] = self.pixels[:,None]+self.shape
return self._derived_properties["vertices"]
@property
def is_shape_unique(self):
""" """
return len(np.shape(self.shape))==2
@property
def geodataframe(self):
""" """
if self._derived_properties["geodataframe"] is None:
self._update_geodataframe_()
return self._derived_properties["geodataframe"]
@property
def triangulation(self):
""" Triangulation of the vertices. Based on Delaunay tesselation, see shapely.ops.triangulate """
if self._derived_properties["triangulation"] is None:
self.derive_triangulation()
return self._derived_properties["triangulation"]
| 35.08046
| 145
| 0.550178
| 2,326
| 21,364
| 4.94196
| 0.155632
| 0.031057
| 0.014876
| 0.009134
| 0.165811
| 0.114485
| 0.10274
| 0.071596
| 0.063941
| 0.063941
| 0
| 0.005833
| 0.325969
| 21,364
| 608
| 146
| 35.138158
| 0.792431
| 0.252294
| 0
| 0.181818
| 0
| 0.006993
| 0.100605
| 0.002789
| 0
| 0
| 0
| 0
| 0.006993
| 1
| 0.13986
| false
| 0
| 0.041958
| 0.006993
| 0.335664
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac66c22ad3d5b81a13742eecef45d93fd664ee6
| 31,445
|
py
|
Python
|
numpy/lib/format.py
|
AnirudhDagar/numpy
|
77bc3225e6f4badf83190ec300a0e10e56949644
|
[
"BSD-3-Clause"
] | 5
|
2021-08-23T06:23:15.000Z
|
2022-02-05T07:27:30.000Z
|
numpy/lib/format.py
|
AnirudhDagar/numpy
|
77bc3225e6f4badf83190ec300a0e10e56949644
|
[
"BSD-3-Clause"
] | 75
|
2021-07-12T01:28:50.000Z
|
2022-03-28T20:09:00.000Z
|
numpy/lib/format.py
|
AnirudhDagar/numpy
|
77bc3225e6f4badf83190ec300a0e10e56949644
|
[
"BSD-3-Clause"
] | 1
|
2019-11-05T15:23:08.000Z
|
2019-11-05T15:23:08.000Z
|
"""
Binary serialization
NPY format
==========
A simple format for saving numpy arrays to disk with the full
information about them.
The ``.npy`` format is the standard binary file format in NumPy for
persisting a *single* arbitrary NumPy array on disk. The format stores all
of the shape and dtype information necessary to reconstruct the array
correctly even on another machine with a different architecture.
The format is designed to be as simple as possible while achieving
its limited goals.
The ``.npz`` format is the standard format for persisting *multiple* NumPy
arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
files, one for each array.
Capabilities
------------
- Can represent all NumPy arrays including nested record arrays and
object arrays.
- Represents the data in its native binary form.
- Supports Fortran-contiguous arrays directly.
- Stores all of the necessary information to reconstruct the array
including shape and dtype on a machine of a different
architecture. Both little-endian and big-endian arrays are
supported, and a file with little-endian numbers will yield
a little-endian array on any machine reading the file. The
types are described in terms of their actual sizes. For example,
if a machine with a 64-bit C "long int" writes out an array with
"long ints", a reading machine with 32-bit C "long ints" will yield
an array with 64-bit integers.
- Is straightforward to reverse engineer. Datasets often live longer than
the programs that created them. A competent developer should be
able to create a solution in their preferred programming language to
read most ``.npy`` files that they have been given without much
documentation.
- Allows memory-mapping of the data. See `open_memmap`.
- Can be read from a filelike stream object instead of an actual file.
- Stores object arrays, i.e. arrays containing elements that are arbitrary
Python objects. Files with object arrays are not to be mmapable, but
can be read and written to disk.
Limitations
-----------
- Arbitrary subclasses of numpy.ndarray are not completely preserved.
Subclasses will be accepted for writing, but only the array data will
be written out. A regular numpy.ndarray object will be created
upon reading the file.
.. warning::
Due to limitations in the interpretation of structured dtypes, dtypes
with fields with empty names will have the names replaced by 'f0', 'f1',
etc. Such arrays will not round-trip through the format entirely
accurately. The data is intact; only the field names will differ. We are
working on a fix for this. This fix will not require a change in the
file format. The arrays with such structures can still be saved and
restored, and the correct dtype may be restored by using the
``loadedarray.view(correct_dtype)`` method.
File extensions
---------------
We recommend using the ``.npy`` and ``.npz`` extensions for files saved
in this format. This is by no means a requirement; applications may wish
to use these file formats but use an extension specific to the
application. In the absence of an obvious alternative, however,
we suggest using ``.npy`` and ``.npz``.
Version numbering
-----------------
The version numbering of these formats is independent of NumPy version
numbering. If the format is upgraded, the code in `numpy.io` will still
be able to read and write Version 1.0 files.
Format Version 1.0
------------------
The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
The next 1 byte is an unsigned byte: the major version number of the file
format, e.g. ``\\x01``.
The next 1 byte is an unsigned byte: the minor version number of the file
format, e.g. ``\\x00``. Note: the version of the file format is not tied
to the version of the numpy package.
The next 2 bytes form a little-endian unsigned short int: the length of
the header data HEADER_LEN.
The next HEADER_LEN bytes form the header data describing the array's
format. It is an ASCII string which contains a Python literal expression
of a dictionary. It is terminated by a newline (``\\n``) and padded with
spaces (``\\x20``) to make the total of
``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
by 64 for alignment purposes.
The dictionary contains three keys:
"descr" : dtype.descr
An object that can be passed as an argument to the `numpy.dtype`
constructor to create the array's dtype.
"fortran_order" : bool
Whether the array data is Fortran-contiguous or not. Since
Fortran-contiguous arrays are a common form of non-C-contiguity,
we allow them to be written directly to disk for efficiency.
"shape" : tuple of int
The shape of the array.
For repeatability and readability, the dictionary keys are sorted in
alphabetic order. This is for convenience only. A writer SHOULD implement
this if possible. A reader MUST NOT depend on this.
Following the header comes the array data. If the dtype contains Python
objects (i.e. ``dtype.hasobject is True``), then the data is a Python
pickle of the array. Otherwise the data is the contiguous (either C-
or Fortran-, depending on ``fortran_order``) bytes of the array.
Consumers can figure out the number of bytes by multiplying the number
of elements given by the shape (noting that ``shape=()`` means there is
1 element) by ``dtype.itemsize``.
Format Version 2.0
------------------
The version 1.0 format only allowed the array header to have a total size of
65535 bytes. This can be exceeded by structured arrays with a large number of
columns. The version 2.0 format extends the header size to 4 GiB.
`numpy.save` will automatically save in 2.0 format if the data requires it,
else it will always use the more compatible 1.0 format.
The description of the fourth element of the header therefore has become:
"The next 4 bytes form a little-endian unsigned int: the length of the header
data HEADER_LEN."
Format Version 3.0
------------------
This version replaces the ASCII string (which in practice was latin1) with
a utf8-encoded string, so supports structured types with any unicode field
names.
Notes
-----
The ``.npy`` format, including motivation for creating it and a comparison of
alternatives, is described in the
:doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have
evolved with time and this document is more current.
"""
import numpy
import io
import warnings
from numpy.lib.utils import safe_eval
from numpy.compat import (
isfileobj, os_fspath, pickle
)
__all__ = []
EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'}
MAGIC_PREFIX = b'\x93NUMPY'
MAGIC_LEN = len(MAGIC_PREFIX) + 2
ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
# instead of 2 bytes (H) allowing storage of large structured arrays
_header_size_info = {
(1, 0): ('<H', 'latin1'),
(2, 0): ('<I', 'latin1'),
(3, 0): ('<I', 'utf8'),
}
def _check_version(version):
if version not in [(1, 0), (2, 0), (3, 0), None]:
msg = "we only support format version (1,0), (2,0), and (3,0), not %s"
raise ValueError(msg % (version,))
def magic(major, minor):
""" Return the magic string for the given file format version.
Parameters
----------
major : int in [0, 255]
minor : int in [0, 255]
Returns
-------
magic : str
Raises
------
ValueError if the version cannot be formatted.
"""
if major < 0 or major > 255:
raise ValueError("major version must be 0 <= major < 256")
if minor < 0 or minor > 255:
raise ValueError("minor version must be 0 <= minor < 256")
return MAGIC_PREFIX + bytes([major, minor])
def read_magic(fp):
""" Read the magic string to get the version of the file format.
Parameters
----------
fp : filelike object
Returns
-------
major : int
minor : int
"""
magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
if magic_str[:-2] != MAGIC_PREFIX:
msg = "the magic string is not correct; expected %r, got %r"
raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
major, minor = magic_str[-2:]
return major, minor
def _has_metadata(dt):
if dt.metadata is not None:
return True
elif dt.names is not None:
return any(_has_metadata(dt[k]) for k in dt.names)
elif dt.subdtype is not None:
return _has_metadata(dt.base)
else:
return False
def dtype_to_descr(dtype):
"""
Get a serializable descriptor from the dtype.
The .descr attribute of a dtype object cannot be round-tripped through
the dtype() constructor. Simple types, like dtype('float32'), have
a descr which looks like a record array with one field with '' as
a name. The dtype() constructor interprets this as a request to give
a default name. Instead, we construct descriptor that can be passed to
dtype().
Parameters
----------
dtype : dtype
The dtype of the array that will be written to disk.
Returns
-------
descr : object
An object that can be passed to `numpy.dtype()` in order to
replicate the input dtype.
"""
if _has_metadata(dtype):
warnings.warn("metadata on a dtype may be saved or ignored, but will "
"raise if saved when read. Use another form of storage.",
UserWarning, stacklevel=2)
if dtype.names is not None:
# This is a record array. The .descr is fine. XXX: parts of the
# record array with an empty name, like padding bytes, still get
# fiddled with. This needs to be fixed in the C implementation of
# dtype().
return dtype.descr
else:
return dtype.str
def descr_to_dtype(descr):
"""
Returns a dtype based off the given description.
This is essentially the reverse of `dtype_to_descr()`. It will remove
the valueless padding fields created by, i.e. simple fields like
dtype('float32'), and then convert the description to its corresponding
dtype.
Parameters
----------
descr : object
The object retreived by dtype.descr. Can be passed to
`numpy.dtype()` in order to replicate the input dtype.
Returns
-------
dtype : dtype
The dtype constructed by the description.
"""
if isinstance(descr, str):
# No padding removal needed
return numpy.dtype(descr)
elif isinstance(descr, tuple):
# subtype, will always have a shape descr[1]
dt = descr_to_dtype(descr[0])
return numpy.dtype((dt, descr[1]))
titles = []
names = []
formats = []
offsets = []
offset = 0
for field in descr:
if len(field) == 2:
name, descr_str = field
dt = descr_to_dtype(descr_str)
else:
name, descr_str, shape = field
dt = numpy.dtype((descr_to_dtype(descr_str), shape))
# Ignore padding bytes, which will be void bytes with '' as name
# Once support for blank names is removed, only "if name == ''" needed)
is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
if not is_pad:
title, name = name if isinstance(name, tuple) else (None, name)
titles.append(title)
names.append(name)
formats.append(dt)
offsets.append(offset)
offset += dt.itemsize
return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
'offsets': offsets, 'itemsize': offset})
def header_data_from_array_1_0(array):
""" Get the dictionary of header metadata from a numpy.ndarray.
Parameters
----------
array : numpy.ndarray
Returns
-------
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
"""
d = {'shape': array.shape}
if array.flags.c_contiguous:
d['fortran_order'] = False
elif array.flags.f_contiguous:
d['fortran_order'] = True
else:
# Totally non-contiguous data. We will have to make it C-contiguous
# before writing. Note that we need to test for C_CONTIGUOUS first
# because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
d['fortran_order'] = False
d['descr'] = dtype_to_descr(array.dtype)
return d
def _wrap_header(header, version):
"""
Takes a stringified header, and attaches the prefix and padding to it
"""
import struct
assert version is not None
fmt, encoding = _header_size_info[version]
if not isinstance(header, bytes): # always true on python 3
header = header.encode(encoding)
hlen = len(header) + 1
padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
try:
header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
except struct.error:
msg = "Header length {} too big for version={}".format(hlen, version)
raise ValueError(msg) from None
# Pad the header with spaces and a final newline such that the magic
# string, the header-length short and the header are aligned on a
# ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
# aligned up to ARRAY_ALIGN on systems like Linux where mmap()
# offset must be page-aligned (i.e. the beginning of the file).
return header_prefix + header + b' '*padlen + b'\n'
def _wrap_header_guess_version(header):
"""
Like `_wrap_header`, but chooses an appropriate version given the contents
"""
try:
return _wrap_header(header, (1, 0))
except ValueError:
pass
try:
ret = _wrap_header(header, (2, 0))
except UnicodeEncodeError:
pass
else:
warnings.warn("Stored array in format 2.0. It can only be"
"read by NumPy >= 1.9", UserWarning, stacklevel=2)
return ret
header = _wrap_header(header, (3, 0))
warnings.warn("Stored array in format 3.0. It can only be "
"read by NumPy >= 1.17", UserWarning, stacklevel=2)
return header
def _write_array_header(fp, d, version=None):
""" Write the header for an array and returns the version used
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
version: tuple or None
None means use oldest that works
explicit version will raise a ValueError if the format does not
allow saving this data. Default: None
"""
header = ["{"]
for key, value in sorted(d.items()):
# Need to use repr here, since we eval these when reading
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
if version is None:
header = _wrap_header_guess_version(header)
else:
header = _wrap_header(header, version)
fp.write(header)
def write_array_header_1_0(fp, d):
""" Write the header for an array using the 1.0 format.
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file.
"""
_write_array_header(fp, d, (1, 0))
def write_array_header_2_0(fp, d):
""" Write the header for an array using the 2.0 format.
The 2.0 format allows storing very large structured arrays.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file.
"""
_write_array_header(fp, d, (2, 0))
def read_array_header_1_0(fp):
"""
Read an array header from a filelike object using the 1.0 file format
version.
This will leave the file object located just after the header.
Parameters
----------
fp : filelike object
A file object or something with a `.read()` method like a file.
Returns
-------
shape : tuple of int
The shape of the array.
fortran_order : bool
The array data will be written out directly if it is either
C-contiguous or Fortran-contiguous. Otherwise, it will be made
contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
Raises
------
ValueError
If the data is invalid.
"""
return _read_array_header(fp, version=(1, 0))
def read_array_header_2_0(fp):
"""
Read an array header from a filelike object using the 2.0 file format
version.
This will leave the file object located just after the header.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
A file object or something with a `.read()` method like a file.
Returns
-------
shape : tuple of int
The shape of the array.
fortran_order : bool
The array data will be written out directly if it is either
C-contiguous or Fortran-contiguous. Otherwise, it will be made
contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
Raises
------
ValueError
If the data is invalid.
"""
return _read_array_header(fp, version=(2, 0))
def _filter_header(s):
"""Clean up 'L' in npz header ints.
Cleans up the 'L' in strings representing integers. Needed to allow npz
headers produced in Python2 to be read in Python3.
Parameters
----------
s : string
Npy file header.
Returns
-------
header : str
Cleaned up header.
"""
import tokenize
from io import StringIO
tokens = []
last_token_was_number = False
for token in tokenize.generate_tokens(StringIO(s).readline):
token_type = token[0]
token_string = token[1]
if (last_token_was_number and
token_type == tokenize.NAME and
token_string == "L"):
continue
else:
tokens.append(token)
last_token_was_number = (token_type == tokenize.NUMBER)
return tokenize.untokenize(tokens)
def _read_array_header(fp, version):
"""
see read_array_header_1_0
"""
# Read an unsigned, little-endian short int which has the length of the
# header.
import struct
hinfo = _header_size_info.get(version)
if hinfo is None:
raise ValueError("Invalid version {!r}".format(version))
hlength_type, encoding = hinfo
hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
header_length = struct.unpack(hlength_type, hlength_str)[0]
header = _read_bytes(fp, header_length, "array header")
header = header.decode(encoding)
# The header is a pretty-printed string representation of a literal
# Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
# boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
# "descr" : dtype.descr
# Versions (2, 0) and (1, 0) could have been created by a Python 2
# implementation before header filtering was implemented.
if version <= (2, 0):
header = _filter_header(header)
try:
d = safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse header: {!r}"
raise ValueError(msg.format(header)) from e
if not isinstance(d, dict):
msg = "Header is not a dictionary: {!r}"
raise ValueError(msg.format(d))
if EXPECTED_KEYS != d.keys():
keys = sorted(d.keys())
msg = "Header does not contain the correct keys: {!r}"
raise ValueError(msg.format(keys))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not all(isinstance(x, int) for x in d['shape'])):
msg = "shape is not valid: {!r}"
raise ValueError(msg.format(d['shape']))
if not isinstance(d['fortran_order'], bool):
msg = "fortran_order is not a valid bool: {!r}"
raise ValueError(msg.format(d['fortran_order']))
try:
dtype = descr_to_dtype(d['descr'])
except TypeError as e:
msg = "descr is not a valid dtype descriptor: {!r}"
raise ValueError(msg.format(d['descr'])) from e
return d['shape'], d['fortran_order'], dtype
def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
"""
Write an array to an NPY file, including a header.
If the array is neither C-contiguous nor Fortran-contiguous AND the
file_like object is not a real file object, this function will have to
copy data in memory.
Parameters
----------
fp : file_like object
An open, writable file object, or similar object with a
``.write()`` method.
array : ndarray
The array to write to disk.
version : (int, int) or None, optional
The version number of the format. None means use the oldest
supported version that is able to store the data. Default: None
allow_pickle : bool, optional
Whether to allow writing pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass to pickle.dump, excluding
'protocol'. These are only useful when pickling objects in object
arrays on Python 3 to Python 2 compatible format.
Raises
------
ValueError
If the array cannot be persisted. This includes the case of
allow_pickle=False and array being an object array.
Various other errors
If the array contains Python objects as part of its dtype, the
process of pickling them may raise various errors if the objects
are not picklable.
"""
_check_version(version)
_write_array_header(fp, header_data_from_array_1_0(array), version)
if array.itemsize == 0:
buffersize = 0
else:
# Set buffer size to 16 MiB to hide the Python loop overhead.
buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data
# directly. Instead, we will pickle it out
if not allow_pickle:
raise ValueError("Object arrays cannot be saved when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
pickle.dump(array, fp, protocol=3, **pickle_kwargs)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
array.T.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='F'):
fp.write(chunk.tobytes('C'))
else:
if isfileobj(fp):
array.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='C'):
fp.write(chunk.tobytes('C'))
def read_array(fp, allow_pickle=False, pickle_kwargs=None):
"""
Read an array from an NPY file.
Parameters
----------
fp : file_like object
If this is not a real file object, then this may take extra memory
and time.
allow_pickle : bool, optional
Whether to allow writing pickled data. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
pickle_kwargs : dict
Additional keyword arguments to pass to pickle.load. These are only
useful when loading object arrays saved on Python 2 when using
Python 3.
Returns
-------
array : ndarray
The array from the data on disk.
Raises
------
ValueError
If the data is invalid, or allow_pickle=False and the file contains
an object array.
"""
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(fp, version)
if len(shape) == 0:
count = 1
else:
count = numpy.multiply.reduce(shape, dtype=numpy.int64)
# Now read the actual data.
if dtype.hasobject:
# The array contained Python objects. We need to unpickle the data.
if not allow_pickle:
raise ValueError("Object arrays cannot be loaded when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
try:
array = pickle.load(fp, **pickle_kwargs)
except UnicodeError as err:
# Friendlier error message
raise UnicodeError("Unpickling a python object failed: %r\n"
"You may need to pass the encoding= option "
"to numpy.load" % (err,)) from err
else:
if isfileobj(fp):
# We can use the fast fromfile() function.
array = numpy.fromfile(fp, dtype=dtype, count=count)
else:
# This is not a real file. We have to read it the
# memory-intensive way.
# crc32 module fails on reads greater than 2 ** 32 bytes,
# breaking large reads from gzip streams. Chunk reads to
# BUFFER_SIZE bytes to avoid issue and reduce memory overhead
# of the read. In non-chunked case count < max_read_count, so
# only one read is performed.
# Use np.ndarray instead of np.empty since the latter does
# not correctly instantiate zero-width string dtypes; see
# https://github.com/numpy/numpy/pull/6430
array = numpy.ndarray(count, dtype=dtype)
if dtype.itemsize > 0:
# If dtype.itemsize == 0 then there's nothing more to read
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
for i in range(0, count, max_read_count):
read_count = min(max_read_count, count - i)
read_size = int(read_count * dtype.itemsize)
data = _read_bytes(fp, read_size, "array data")
array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
count=read_count)
if fortran_order:
array.shape = shape[::-1]
array = array.transpose()
else:
array.shape = shape
return array
def open_memmap(filename, mode='r+', dtype=None, shape=None,
fortran_order=False, version=None):
"""
Open a .npy file as a memory-mapped array.
This may be used to read an existing file or create a new one.
Parameters
----------
filename : str or path-like
The name of the file on disk. This may *not* be a file-like
object.
mode : str, optional
The mode in which to open the file; the default is 'r+'. In
addition to the standard file modes, 'c' is also accepted to mean
"copy on write." See `memmap` for the available mode strings.
dtype : data-type, optional
The data type of the array if we are creating a new file in "write"
mode, if not, `dtype` is ignored. The default value is None, which
results in a data-type of `float64`.
shape : tuple of int
The shape of the array if we are creating a new file in "write"
mode, in which case this parameter is required. Otherwise, this
parameter is ignored and is thus optional.
fortran_order : bool, optional
Whether the array should be Fortran-contiguous (True) or
C-contiguous (False, the default) if we are creating a new file in
"write" mode.
version : tuple of int (major, minor) or None
If the mode is a "write" mode, then this is the version of the file
format used to create the file. None means use the oldest
supported version that is able to store the data. Default: None
Returns
-------
marray : memmap
The memory-mapped array.
Raises
------
ValueError
If the data or the mode is invalid.
IOError
If the file is not found or cannot be opened correctly.
See Also
--------
numpy.memmap
"""
if isfileobj(filename):
raise ValueError("Filename must be a string or a path-like object."
" Memmap cannot use existing file handles.")
if 'w' in mode:
# We are creating the file, not reading it.
# Check if we ought to create the file.
_check_version(version)
# Ensure that the given dtype is an authentic dtype object rather
# than just something that can be interpreted as a dtype object.
dtype = numpy.dtype(dtype)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
d = dict(
descr=dtype_to_descr(dtype),
fortran_order=fortran_order,
shape=shape,
)
# If we got here, then it should be safe to create the file.
with open(os_fspath(filename), mode+'b') as fp:
_write_array_header(fp, d, version)
offset = fp.tell()
else:
# Read the header of the file first.
with open(os_fspath(filename), 'rb') as fp:
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(fp, version)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
offset = fp.tell()
if fortran_order:
order = 'F'
else:
order = 'C'
# We need to change a write-only mode to a read-write mode since we've
# already written data to the file.
if mode == 'w+':
mode = 'r+'
marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
mode=mode, offset=offset)
return marray
def _read_bytes(fp, size, error_template="ran out of data"):
"""
Read from file-like object until size bytes are read.
Raises ValueError if not EOF is encountered before size bytes are read.
Non-blocking objects only supported if they derive from io objects.
Required as e.g. ZipExtFile in python 2.6 can return less data than
requested.
"""
data = bytes()
while True:
# io files (default in python3) return None or raise on
# would-block, python2 file will truncate, probably nothing can be
# done about that. note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
if len(r) == 0 or len(data) == size:
break
except io.BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
raise ValueError(msg % (error_template, size, len(data)))
else:
return data
| 34.21654
| 87
| 0.641437
| 4,430
| 31,445
| 4.488713
| 0.16298
| 0.008549
| 0.006336
| 0.006688
| 0.224793
| 0.186724
| 0.16173
| 0.151974
| 0.147448
| 0.135479
| 0
| 0.011151
| 0.272794
| 31,445
| 918
| 88
| 34.253813
| 0.858442
| 0.558181
| 0
| 0.215873
| 0
| 0.003175
| 0.118329
| 0
| 0
| 0
| 0
| 0
| 0.003175
| 1
| 0.063492
| false
| 0.012698
| 0.028571
| 0
| 0.165079
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac8631a21e5b850ca9b81ac1543a63108f70e71
| 6,090
|
py
|
Python
|
bin/focus_scan.py
|
desihub/desicmx
|
6f7c9a3cff25c970af57de20e3a12001382deb23
|
[
"BSD-3-Clause"
] | 3
|
2019-11-15T23:17:23.000Z
|
2019-11-27T17:19:33.000Z
|
bin/focus_scan.py
|
desihub/desicmx
|
6f7c9a3cff25c970af57de20e3a12001382deb23
|
[
"BSD-3-Clause"
] | 4
|
2019-12-12T03:37:32.000Z
|
2020-01-28T21:29:51.000Z
|
bin/focus_scan.py
|
desihub/desicmx
|
6f7c9a3cff25c970af57de20e3a12001382deb23
|
[
"BSD-3-Clause"
] | 2
|
2019-12-20T08:21:52.000Z
|
2020-06-30T15:21:53.000Z
|
#!/usr/bin/env python
import astropy.io.fits as fits
import numpy as np
import os
import matplotlib.pyplot as plt
import argparse
def _fname(expid, night,
basedir='/n/home/datasystems/users/ameisner/reduced/focus',
ccds=False):
fname = basedir + '/' + night + '/' + str(expid).zfill(8) + '/gfa-' + str(expid).zfill(8) + '_psfs.fits'
if ccds:
fname = fname.replace('_psfs.fits', '_ccds.fits')
return fname
def _actual_expid_list(expids, night, basedir='/n/home/datasystems/users/ameisner/reduced/focus'):
keep = []
for i, expid in enumerate(expids):
fname = _fname(expid, night, basedir=basedir, ccds=True)
if not os.path.exists(fname):
continue
tab = fits.getdata(fname)
# try to handle case where observer accidentally lists the 'setup focus scan'
# 1 second exposure as the start of the focus scan
if (i == 0) & (tab[0]['EXPTIME'] < 1.1):
print('SKIPPING DUMMY SETUP EXPOSURE')
continue
program = tab[0]['PROGRAM'].strip()
if program != 'focus scan':
break
keep.append(expid)
return keep
def focus_plots(night, expids,
basedir='/n/home/datasystems/users/ameisner/reduced/focus',
outdir='/n/home/desiobserver/focus_scan_pngs', no_popups=False):
expids = _actual_expid_list(expids, night, basedir=basedir)
if len(expids) == 0:
print('NO FOCUS SCAN EXPOSURES TO ANALYZE ??')
assert(False)
plt.figure(1, figsize=(12.0*(len(expids)/7.0), 9))
extnames = ['GUIDE0', 'GUIDE2', 'GUIDE3', 'GUIDE5', 'GUIDE7', 'GUIDE8']
focus_z = []
fwhm_pix = []
# PSF stamps plot
plt.subplots_adjust(hspace=0.01, wspace=0.01)
for i, expid in enumerate(expids):
fname = _fname(expid, night, basedir=basedir)
print(fname)
fname_ccds = _fname(expid, night, basedir=basedir, ccds=True)
if not os.path.exists(fname):
continue
ccds = fits.getdata(fname_ccds)
if np.sum(np.isfinite(ccds['PSF_FWHM_PIX'])) != 0:
fwhm_pix.append(np.median(ccds['PSF_FWHM_PIX'][np.isfinite(ccds['PSF_FWHM_PIX'])]))
focus_z.append(float(ccds[0]['FOCUS'].split(',')[2]))
hdul = fits.open(fname)
extnames_present = [hdu.header['EXTNAME'] for hdu in hdul]
for j, extname in enumerate(extnames):
if extname not in extnames_present:
continue
print(i, j)
plt.subplot(6, len(expids), len(expids)*j + i + 1)
plt.xticks([])
plt.yticks([])
im = fits.getdata(fname, extname=extname)
plt.imshow(im, interpolation='nearest', origin='lower', cmap='gray_r', vmin=0.01)
plt.text(5, 44, str(expid) + '; ' + extname, color='r', fontsize=9)
plt.text(10, 3.5, 'z = ' + str(int(float(ccds[0]['FOCUS'].split(',')[2]))), color='r')
if np.isfinite(ccds[j]['XCENTROID_PSF']) and np.isfinite(ccds[j]['YCENTROID_PSF']):
plt.scatter([ccds[j]['XCENTROID_PSF']], [ccds[j]['YCENTROID_PSF']], marker='.', c='r')
expid_min = int(np.min(expids))
print(focus_z)
print(fwhm_pix)
plt.savefig(os.path.join(outdir, 'stamps_focus_scan-' + str(expid_min).zfill(8)+'.png'), bbox_inches='tight')
#plt.cla()
plt.figure(200)
asec_per_pix = 0.205
focus_z = np.array(focus_z)
fwhm_asec = np.array(fwhm_pix)*asec_per_pix
plt.scatter(focus_z, fwhm_asec)
plt.xlabel('focus z (micron)')
plt.ylabel('FWHM (asec)')
coeff = np.polyfit(focus_z, fwhm_asec, 2)
xsamp = np.arange(np.min(focus_z), np.max(focus_z))
ysamp = coeff[0]*(np.power(xsamp, 2)) + coeff[1]*xsamp + coeff[2]
plt.title('focus scan starting with EXPID = ' + str(expid_min))
plt.plot(xsamp, ysamp)
zmin = -coeff[1]/(2*coeff[0])
min_fwhm_fit_asec = coeff[0]*(zmin**2) + coeff[1]*zmin + coeff[2]
yrange = [np.min(fwhm_asec), np.max(fwhm_asec)]
plt.text(focus_z[2], yrange[0] + 0.8*(yrange[1]-yrange[0]), 'best FWHM (meas) : ' + '{:.2f}'.format(np.min(fwhm_asec)))
plt.text(focus_z[2], yrange[0] + 0.7*(yrange[1]-yrange[0]), 'best FWHM (fit) : ' + '{:.2f}'.format(min_fwhm_fit_asec))
plt.text(focus_z[2], yrange[0] + 0.9*(yrange[1]-yrange[0]), 'best focus : ' + str(int(np.round(zmin))))
plt.savefig(os.path.join(outdir, 'fit_focus_scan-' + str(expid_min).zfill(8) + '.png'), bbox_inches='tight')
if not no_popups:
plt.show()
def _test():
night = '20200131'
expids = 45446 + np.arange(7)
focus_plots(night, expids, basedir='/project/projectdirs/desi/users/ameisner/GFA/run/psf_flux_weighted_centroid', outdir='.')
def _test_missing_cam():
night = '20200131'
expids = 45485 + np.arange(7)
focus_plots(night, expids, basedir='/project/projectdirs/desi/users/ameisner/GFA/run/psf_flux_weighted_centroid')
if __name__ == "__main__":
descr = 'GFA focus sequence plots/analysis'
parser = argparse.ArgumentParser(description=descr)
parser.add_argument('first_expid', type=int, nargs=1)
parser.add_argument('night', type=str, nargs=1)
parser.add_argument('--basedir', default='/n/home/datasystems/users/ameisner/reduced/focus',
type=str, help='base directory for GFA reductions')
parser.add_argument('--outdir', default='/n/home/desiobserver/focus_scan_pngs',
type=str, help='output directory for plot PNGs')
parser.add_argument('--no_popups', default=False, action='store_true',
help='write PNGs without popping up plot windows')
args = parser.parse_args()
expids = args.first_expid + np.arange(16, dtype=int)
print(expids)
print(args.night[0])
print(args.basedir)
outdir = args.outdir if os.path.exists(args.outdir) else '.'
focus_plots(args.night[0], expids, basedir=args.basedir, outdir=outdir, no_popups=args.no_popups)
| 35.614035
| 129
| 0.612315
| 836
| 6,090
| 4.327751
| 0.276316
| 0.021559
| 0.023494
| 0.024323
| 0.326423
| 0.30293
| 0.220011
| 0.208679
| 0.195412
| 0.158928
| 0
| 0.02575
| 0.228407
| 6,090
| 170
| 130
| 35.823529
| 0.744201
| 0.027915
| 0
| 0.104348
| 0
| 0
| 0.186475
| 0.069992
| 0
| 0
| 0
| 0
| 0.008696
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.104348
| 0.078261
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ac951af97aa3d1a0ef9e931276c0e45ff2d14cc
| 4,344
|
py
|
Python
|
pythia/utils/logger.py
|
abhiskk/pythia
|
c33fb45d74353c25b6269b44551bcafefecb5c7e
|
[
"BSD-3-Clause"
] | 2
|
2019-05-23T02:07:03.000Z
|
2019-06-08T18:56:05.000Z
|
pythia/utils/logger.py
|
abhiskk/pythia
|
c33fb45d74353c25b6269b44551bcafefecb5c7e
|
[
"BSD-3-Clause"
] | null | null | null |
pythia/utils/logger.py
|
abhiskk/pythia
|
c33fb45d74353c25b6269b44551bcafefecb5c7e
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
import base64
import logging
import os
import sys
from tensorboardX import SummaryWriter
from pythia.utils.distributed_utils import is_main_process
from pythia.utils.general import (ckpt_name_from_core_args,
foldername_from_config_override)
from pythia.utils.timer import Timer
class Logger:
def __init__(self, config):
self.logger = None
self.summary_writer = None
if not is_main_process():
return
self.timer = Timer()
self.config = config
self.save_dir = config.training_parameters.save_dir
self.log_folder = ckpt_name_from_core_args(config)
self.log_folder += foldername_from_config_override(config)
time_format = "%Y-%m-%dT%H:%M:%S"
self.log_filename = ckpt_name_from_core_args(config) + "_"
self.log_filename += self.timer.get_time_hhmmss(None, format=time_format)
self.log_filename += ".log"
self.log_folder = os.path.join(self.save_dir, self.log_folder, "logs")
arg_log_dir = self.config.get("log_dir", None)
if arg_log_dir:
self.log_folder = arg_log_dir
if not os.path.exists(self.log_folder):
os.makedirs(self.log_folder)
tensorboard_folder = os.path.join(self.log_folder, "tensorboard")
self.summary_writer = SummaryWriter(tensorboard_folder)
self.log_filename = os.path.join(self.log_folder, self.log_filename)
print("Logging to:", self.log_filename)
logging.captureWarnings(True)
self.logger = logging.getLogger(__name__)
self._file_only_logger = logging.getLogger(__name__)
warnings_logger = logging.getLogger("py.warnings")
# Set level
level = config["training_parameters"].get("logger_level", "info")
self.logger.setLevel(getattr(logging, level.upper()))
self._file_only_logger.setLevel(getattr(logging, level.upper()))
formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%dT%H:%M:%S"
)
# Add handler to file
channel = logging.FileHandler(filename=self.log_filename, mode="a")
channel.setFormatter(formatter)
self.logger.addHandler(channel)
self._file_only_logger.addHandler(channel)
warnings_logger.addHandler(channel)
# Add handler to stdout
channel = logging.StreamHandler(sys.stdout)
channel.setFormatter(formatter)
self.logger.addHandler(channel)
warnings_logger.addHandler(channel)
should_not_log = self.config["training_parameters"]["should_not_log"]
self.should_log = not should_not_log
# Single log wrapper map
self._single_log_map = set()
def __del__(self):
if getattr(self, "summary_writer", None) is not None:
self.summary_writer.close()
def write(self, x, level="info", donot_print=False):
if self.logger is None:
return
# if it should not log then just print it
if self.should_log:
if hasattr(self.logger, level):
if donot_print:
getattr(self._file_only_logger, level)(str(x))
else:
getattr(self.logger, level)(str(x))
else:
self.logger.error("Unknown log level type: %s" % level)
else:
print(str(x) + "\n")
def single_write(self, x, level="info"):
if x + "_" + level in self._single_log_map:
return
else:
self.write(x, level)
def add_scalar(self, key, value, iteration):
if self.summary_writer is None:
return
self.summary_writer.add_scalar(key, value, iteration)
def add_scalars(self, scalar_dict, iteration):
if self.summary_writer is None:
return
for key, val in scalar_dict.items():
self.summary_writer.add_scalar(key, val, iteration)
def add_histogram_for_model(self, model, iteration):
if self.summary_writer is None:
return
for name, param in model.named_parameters():
np_param = param.clone().cpu().data.numpy()
self.summary_writer.add_histogram(name, np_param, iteration)
| 34.204724
| 81
| 0.63628
| 540
| 4,344
| 4.874074
| 0.246296
| 0.042553
| 0.06459
| 0.027356
| 0.282295
| 0.220745
| 0.147036
| 0.072948
| 0.032675
| 0
| 0
| 0.000625
| 0.263352
| 4,344
| 126
| 82
| 34.47619
| 0.821875
| 0.037523
| 0
| 0.206522
| 0
| 0
| 0.057738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076087
| false
| 0
| 0.086957
| 0
| 0.23913
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6acd5e71b7f337a2cb3ca947d7cf6d05f0a0b474
| 851
|
py
|
Python
|
setup.py
|
chearon/macpack
|
1cf6ce453dd33a811343e4bb6ee5575bc9fe919d
|
[
"MIT"
] | 24
|
2016-11-14T14:09:57.000Z
|
2022-01-26T02:22:45.000Z
|
setup.py
|
najiji/macpack
|
20b518e9bc0f4e58d47c5416a686a4b246a3764d
|
[
"MIT"
] | 5
|
2016-11-14T14:09:53.000Z
|
2019-04-18T15:49:14.000Z
|
setup.py
|
najiji/macpack
|
20b518e9bc0f4e58d47c5416a686a4b246a3764d
|
[
"MIT"
] | 3
|
2018-01-27T15:38:46.000Z
|
2019-04-09T16:21:23.000Z
|
import setuptools
import os
try:
import pypandoc
description = pypandoc.convert('README.md', 'rst') if os.path.exists('README.md') else ''
except ImportError:
description = ''
setuptools.setup(
name = 'macpack',
packages = setuptools.find_packages(),
version = '1.0.3',
description = 'Makes a macOS binary redistributable by searching the dependency tree and copying/patching non-system libraries.',
long_description = description,
author = 'Caleb Hearon',
author_email = '[email protected]',
url = 'https://github.com/chearon/macpack',
download_url = 'https://github.com/chearon/macpack/tarball/v1.0.3',
keywords = ['macos', 'bundle', 'package', 'redistribute', 'redistributable', 'install_name_tool', 'otool', 'mach'],
classifiers = [],
entry_points = {
'console_scripts': ['macpack=macpack.patcher:main'],
}
)
| 32.730769
| 131
| 0.706228
| 100
| 851
| 5.93
| 0.7
| 0.026981
| 0.047218
| 0.057336
| 0.104553
| 0.104553
| 0
| 0
| 0
| 0
| 0
| 0.008208
| 0.141011
| 851
| 25
| 132
| 34.04
| 0.80301
| 0
| 0
| 0
| 0
| 0
| 0.435958
| 0.032902
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.173913
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6acdf3a0dc36f1ce88eb6431d38ef46ea81f633b
| 1,371
|
py
|
Python
|
WEEK2/day5/scripts/06_NB_Challenges_Isolines.py
|
tizon9804/SS2017
|
7cb374ad21cdfeeef223ac4a65cbbf40dab22e06
|
[
"MIT"
] | null | null | null |
WEEK2/day5/scripts/06_NB_Challenges_Isolines.py
|
tizon9804/SS2017
|
7cb374ad21cdfeeef223ac4a65cbbf40dab22e06
|
[
"MIT"
] | null | null | null |
WEEK2/day5/scripts/06_NB_Challenges_Isolines.py
|
tizon9804/SS2017
|
7cb374ad21cdfeeef223ac4a65cbbf40dab22e06
|
[
"MIT"
] | null | null | null |
import vtk
# Read the file (to test that it was written correctly)
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName("../data/wind_image.vti")
reader.Update()
print(reader.GetOutput())
# Convert the image to a polydata
imageDataGeometryFilter = vtk.vtkImageDataGeometryFilter()
imageDataGeometryFilter.SetInputConnection(reader.GetOutputPort())
imageDataGeometryFilter.Update()
scalarRange = reader.GetOutput().GetPointData().GetScalars().GetRange(-1)
contoursFilter = vtk.vtkContourFilter()
contoursFilter.SetInputConnection(imageDataGeometryFilter.GetOutputPort())
contoursFilter.GenerateValues(60, scalarRange)
contoursMapper = vtk.vtkPolyDataMapper()
contoursMapper.SetInputConnection(contoursFilter.GetOutputPort())
contoursMapper.SetColorModeToMapScalars()
contoursMapper.ScalarVisibilityOn()
contoursMapper.SelectColorArray("JPEGImage")
contoursMapper.SetScalarRange(scalarRange)
contoursActor = vtk.vtkActor()
contoursActor.SetMapper(contoursMapper)
actor = vtk.vtkActor()
actor.SetMapper(contoursMapper)
# Setup rendering
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renderer.SetBackground(1,1,1)
renderer.ResetCamera()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.Start()
| 30.466667
| 74
| 0.835157
| 117
| 1,371
| 9.777778
| 0.538462
| 0.026224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004677
| 0.064187
| 1,371
| 44
| 75
| 31.159091
| 0.886984
| 0.073669
| 0
| 0
| 0
| 0
| 0.024506
| 0.017391
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032258
| 0
| 0.032258
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ace6e18f6860e091c836de50634b3a607e70811
| 11,303
|
py
|
Python
|
mbbl_envs/mbbl/env/gym_env/invertedPendulum.py
|
hbutsuak95/iv_rl
|
0f72a8f077a238237027ea96b7d1160c35ac9959
|
[
"MIT"
] | 9
|
2022-01-16T11:27:00.000Z
|
2022-03-13T14:04:48.000Z
|
mbbl_envs/mbbl/env/gym_env/invertedPendulum.py
|
hbutsuak95/iv_rl
|
0f72a8f077a238237027ea96b7d1160c35ac9959
|
[
"MIT"
] | null | null | null |
mbbl_envs/mbbl/env/gym_env/invertedPendulum.py
|
hbutsuak95/iv_rl
|
0f72a8f077a238237027ea96b7d1160c35ac9959
|
[
"MIT"
] | null | null | null |
"""
# -----------------------------------------------------------------------------
# @brief:
# Tingwu: reset the reward function so that it's more similar to the one
# defined in GYM
# -----------------------------------------------------------------------------
"""
import numpy as np
from mbbl.config import init_path
from mbbl.env import base_env_wrapper as bew
from mbbl.env import env_register
from mbbl.env import env_util
from mbbl.util.common import logger
class env(bew.base_env):
# acrobot has applied sin/cos obs
PENDULUM = ['gym_invertedPendulum']
def __init__(self, env_name, rand_seed, misc_info):
super(env, self).__init__(env_name, rand_seed, misc_info)
self._base_path = init_path.get_abs_base_dir()
self._len_qpos, self._len_qvel = \
env_util.get_gym_q_info(self._env, self._current_version)
# return the reset as the gym?
if 'reset_type' in misc_info and misc_info['reset_type'] == 'gym':
self._reset_return_obs_only = True
self.observation_space, self.action_space = \
self._env.observation_space, self._env.action_space
# it's possible some environments have different obs
self.observation_space = \
env_util.box(self._env_info['ob_size'], -1, 1)
else:
self._reset_return_obs_only = False
def step(self, action):
_, _, _, info = self._env.step(action)
ob = self._get_observation()
# get the reward
reward = self.reward(
{'end_state': ob, 'start_state': self._old_ob, 'action': action}
)
# from mbbl.util.common.fpdb import fpdb; fpdb().set_trace()
# get the end signal
self._current_step += 1
info['current_step'] = self._current_step
if self._current_step > self._env_info['max_length']:
done = True
else:
done = False # will raise warnings -> set logger flag to ignore
self._old_ob = np.array(ob)
return ob, reward, done, info
def reset(self, control_info={}):
self._current_step = 0
self._env.reset()
# the following is a hack, there is some precision issue in mujoco_py
self._old_ob = self._get_observation()
self._env.reset()
self.set_state({'start_state': self._old_ob.copy()})
self._old_ob = self._get_observation()
if self._reset_return_obs_only:
return self._old_ob.copy()
else:
return self._old_ob.copy(), 0.0, False, {}
def _get_observation(self):
if self._current_version in ['0.7.4', '0.9.4']:
qpos = self._env.env.data.qpos
qvel = self._env.env.data.qvel
else:
qpos = self._env.env.sim.data.qpos
qvel = self._env.env.sim.data.qvel
"""
if self._env_name == 'gym_doublePendulum':
if self._current_version in ['0.7.4', '0.9.4']:
site_xpos = self._env.env.data.site_xpos[:, [0, 2]]
else:
site_xpos = self._env.env.sim.data.site_xpos[:, [0, 2]]
site_xpos = np.transpose(site_xpos)
return np.concatenate([qpos, qvel, site_xpos]).ravel()
else:
"""
assert self._env_name == 'gym_invertedPendulum'
return np.concatenate([qpos, qvel]).ravel()
def _build_env(self):
import gym
self._current_version = gym.__version__
if self._current_version in ['0.7.4', '0.9.4']:
_env_name = {
'gym_invertedPendulum': 'InvertedPendulum-v1',
}
elif self._current_version == NotImplementedError:
# TODO: other gym versions here
_env_name = {
'gym_invertedPendulum': 'InvertedPendulum-v2',
}
else:
raise ValueError("Invalid gym-{}".format(self._current_version))
# make the environments
self._env_info = env_register.get_env_info(self._env_name)
self._env_name = self._env_name.split('-')[0]
self._env = gym.make(_env_name[self._env_name])
def _set_groundtruth_api(self):
""" @brief:
In this function, we could provide the ground-truth dynamics
and rewards APIs for the agent to call.
For the new environments, if we don't set their ground-truth
apis, then we cannot test the algorithm using ground-truth
dynamics or reward
"""
self._set_reward_api()
self._set_dynamics_api()
def _set_dynamics_api(self):
def set_state(data_dict):
qpos = np.zeros([self._len_qpos])
qvel = np.zeros([self._len_qvel])
qpos[:] = data_dict['start_state'][:self._len_qpos]
qvel[:] = data_dict['start_state'][
self._len_qpos: self._len_qpos + self._len_qvel
]
# reset the state
if self._current_version in ['0.7.4', '0.9.4']:
self._env.env.data.qpos = qpos.reshape([-1, 1])
self._env.env.data.qvel = qvel.reshape([-1, 1])
else:
self._env.env.sim.data.qpos = qpos.reshape([-1])
self._env.env.sim.data.qvel = qpos.reshape([-1])
self._env.env.model._compute_subtree() # pylint: disable=W0212
self._env.env.model.forward()
self._old_ob = self._get_observation()
self.set_state = set_state
def fdynamics(data_dict):
# make sure reset is called before using self.fynamics()
self.set_state(data_dict)
return self.step(data_dict['action'])[0]
self.fdynamics = fdynamics
def _set_reward_api(self):
"""
def _step(self, a):
reward = 1.0
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
notdone = np.isfinite(ob).all() and (np.abs(ob[1]) <= .2)
done = not notdone
self.do_simulation(action, self.frame_skip)
ob = self._get_obs()
x, _, y = self.model.data.site_xpos[0]
dist_penalty = 0.01 * x ** 2 + (y - 2) ** 2
v1, v2 = self.model.data.qvel[1:3]
vel_penalty = 1e-3 * v1**2 + 5e-3 * v2**2
alive_bonus = 10
r = (alive_bonus - dist_penalty - vel_penalty)[0]
done = bool(y <= 1)
return ob, r, done, {}
reward:
@xpos_penalty: x ** 2
@ypos_penalty: (y - 2) ** 2
pendulum: (slide, hinge)
qpos: 2 (0, 1)
qvel: 2 (2, 3)
double_pendulum: (slide, hinge, hinge)
qpos: 3 (0, 1, 2)
qvel: 3 (3, 4, 5)
site_pose: 2 (6, 7)
"""
# step 1, set the zero-order reward function
assert self._env_name in self.PENDULUM
"""
xpos_ob_pos = \
{'gym_pendulum': 0, 'gym_doublePendulum': 6}[self._env_name]
ypos_ob_pos = \
{'gym_pendulum': 1, 'gym_doublePendulum': 7}[self._env_name]
ypos_target = \
{'gym_pendulum': 0.0, 'gym_doublePendulum': 2}[self._env_name]
xpos_coeff = \
{'gym_pendulum': 0.0, 'gym_doublePendulum': 0.01}[self._env_name]
"""
xpos_ob_pos = 0
ypos_ob_pos = 1
ypos_target = 0.0
xpos_coeff = 0.0
def reward(data_dict):
# xpos penalty
xpos = data_dict['start_state'][xpos_ob_pos]
xpos_reward = -(xpos ** 2) * xpos_coeff
# ypos penalty
ypos = data_dict['start_state'][ypos_ob_pos]
ypos_reward = -(ypos - ypos_target) ** 2
return xpos_reward + ypos_reward
self.reward = reward
def reward_derivative(data_dict, target):
num_data = len(data_dict['start_state'])
if target == 'state':
derivative_data = np.zeros(
[num_data, self._env_info['ob_size']], dtype=np.float
)
# the xpos reward part
derivative_data[:, xpos_ob_pos] += - 2.0 * xpos_coeff * \
(data_dict['start_state'][:, xpos_ob_pos])
# the ypos reward part
derivative_data[:, ypos_ob_pos] += - 2.0 * \
(data_dict['start_state'][:, ypos_ob_pos] - ypos_target)
elif target == 'action':
derivative_data = np.zeros(
[num_data, self._env_info['action_size']], dtype=np.float
)
elif target == 'state-state':
derivative_data = np.zeros(
[num_data,
self._env_info['ob_size'], self._env_info['ob_size']],
dtype=np.float
)
# the xpos reward
derivative_data[:, xpos_ob_pos, xpos_ob_pos] += \
- 2.0 * xpos_coeff
# the ypos reward
derivative_data[:, ypos_ob_pos, ypos_ob_pos] += \
- 2.0
elif target == 'action-state':
derivative_data = np.zeros(
[num_data, self._env_info['action_size'],
self._env_info['ob_size']],
dtype=np.float
)
elif target == 'state-action':
derivative_data = np.zeros(
[num_data, self._env_info['ob_size'],
self._env_info['action_size']],
dtype=np.float
)
elif target == 'action-action':
derivative_data = np.zeros(
[num_data, self._env_info['action_size'],
self._env_info['action_size']],
dtype=np.float
)
else:
assert False, logger.error('Invalid target {}'.format(target))
return derivative_data
self.reward_derivative = reward_derivative
def render(self, *args, **kwargs):
return
if __name__ == '__main__':
# test_env_name = ['gym_doublePendulum']
test_env_name = ['gym_invertedPendulum']
for env_name in test_env_name:
test_env = env(env_name, 1234, {})
api_env = env(env_name, 1234, {})
api_env.reset()
ob, reward, _, _ = test_env.reset()
for _ in range(100):
action = np.random.uniform(-1, 1, test_env._env.action_space.shape)
new_ob, reward, _, _ = test_env.step(action)
# test the reward api
reward_from_api = \
api_env.reward({'start_state': ob, 'action': action})
reward_error = np.sum(np.abs(reward_from_api - reward))
# test the dynamics api
newob_from_api = \
api_env.fdynamics({'start_state': ob, 'action': action})
ob_error = np.sum(np.abs(newob_from_api - new_ob))
ob = new_ob
print('reward error: {}, dynamics error: {}'.format(
reward_error, ob_error)
)
| 35.656151
| 79
| 0.529594
| 1,356
| 11,303
| 4.106195
| 0.168879
| 0.055316
| 0.025682
| 0.022629
| 0.363865
| 0.244971
| 0.181394
| 0.122665
| 0.11153
| 0.104346
| 0
| 0.018529
| 0.345837
| 11,303
| 316
| 80
| 35.768987
| 0.734514
| 0.178714
| 0
| 0.175141
| 0
| 0
| 0.074819
| 0
| 0
| 0
| 0
| 0.003165
| 0.016949
| 1
| 0.073446
| false
| 0
| 0.039548
| 0.00565
| 0.169492
| 0.00565
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ad0bc72be93fcbf7c2b0d3f4185b26d3bfb3b1c
| 1,426
|
py
|
Python
|
web/pingpongpiweb.py
|
andrewdyersmith/pingpongpi
|
63e969468da24b2d00e86033dfcb22de75f264bc
|
[
"MIT"
] | null | null | null |
web/pingpongpiweb.py
|
andrewdyersmith/pingpongpi
|
63e969468da24b2d00e86033dfcb22de75f264bc
|
[
"MIT"
] | null | null | null |
web/pingpongpiweb.py
|
andrewdyersmith/pingpongpi
|
63e969468da24b2d00e86033dfcb22de75f264bc
|
[
"MIT"
] | null | null | null |
# Ping Pong Pi web UI running on flask.
# Uses zmq to speak to daemon controlling screen.
from flask import Flask, render_template, appcontext_tearing_down, request
from multiprocessing import Process, Queue
from multiprocessing.connection import Client
import atexit
import time
import zmq
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
MODE="mode"
@app.route('/mode/<name>', methods=['POST'])
def mode(name):
text = request.args.get("val", default="", type=str)
message_queue.put([MODE,name,text])
return "\"OK\""
message_queue = Queue()
message_process = None
def message_loop(message_queue):
print("Starting message loop")
context = zmq.Context()
while True:
try:
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5555")
print("Connected to daemon")
while True:
msg = message_queue.get()
print("Sending ", msg)
socket.send_json(msg)
socket.recv()
except Exception as ex:
print(ex)
time.sleep(5)
def stop_message_loop():
print("Terminating")
if message_process:
message_process.terminate()
atexit.register(stop_message_loop)
@app.before_first_request
def setup_ipc():
global message_process
message_process = Process(target=message_loop, args=(message_queue,))
message_process.start()
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 22.634921
| 74
| 0.704067
| 191
| 1,426
| 5.062827
| 0.486911
| 0.086867
| 0.024819
| 0.057911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007608
| 0.170407
| 1,426
| 62
| 75
| 23
| 0.809806
| 0.059607
| 0
| 0.043478
| 0
| 0
| 0.096413
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.130435
| 0.021739
| 0.282609
| 0.108696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ad190f41233de2c7f9d3aa69edc83f906187598
| 5,171
|
py
|
Python
|
watcher/tests/decision_engine/strategy/strategies/test_base.py
|
ajaytikoo/watcher
|
6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159
|
[
"Apache-2.0"
] | 64
|
2015-10-18T02:57:24.000Z
|
2022-01-13T11:27:51.000Z
|
watcher/tests/decision_engine/strategy/strategies/test_base.py
|
ajaytikoo/watcher
|
6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159
|
[
"Apache-2.0"
] | null | null | null |
watcher/tests/decision_engine/strategy/strategies/test_base.py
|
ajaytikoo/watcher
|
6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159
|
[
"Apache-2.0"
] | 35
|
2015-12-25T13:53:21.000Z
|
2021-07-19T15:50:16.000Z
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2019 European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from watcher.common import exception
from watcher.decision_engine.datasources import manager
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import faker_cluster_state
class TestBaseStrategy(base.TestCase):
def setUp(self):
super(TestBaseStrategy, self).setUp()
# fake cluster
self.fake_c_cluster = faker_cluster_state.FakerModelCollector()
p_c_model = mock.patch.object(
strategies.BaseStrategy, "compute_model",
new_callable=mock.PropertyMock)
self.m_c_model = p_c_model.start()
self.addCleanup(p_c_model.stop)
p_audit_scope = mock.patch.object(
strategies.BaseStrategy, "audit_scope",
new_callable=mock.PropertyMock)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
self.m_c_model.return_value = model_root.ModelRoot()
self.strategy = strategies.DummyStrategy(config=mock.Mock())
class TestBaseStrategyDatasource(TestBaseStrategy):
def setUp(self):
super(TestBaseStrategyDatasource, self).setUp()
self.strategy = strategies.DummyStrategy(
config=mock.Mock(datasources=None))
@mock.patch.object(strategies.BaseStrategy, 'osc', None)
@mock.patch.object(manager, 'DataSourceManager')
@mock.patch.object(strategies.base, 'CONF')
def test_global_preference(self, m_conf, m_manager):
"""Test if the global preference is used"""
m_conf.watcher_datasources.datasources = \
['gnocchi', 'monasca', 'ceilometer']
# Make sure we access the property and not the underlying function.
m_manager.return_value.get_backend.return_value = \
mock.NonCallableMock()
# Access the property so that the configuration is read in order to
# get the correct datasource
self.strategy.datasource_backend
m_manager.assert_called_once_with(
config=m_conf.watcher_datasources, osc=None)
@mock.patch.object(strategies.BaseStrategy, 'osc', None)
@mock.patch.object(manager, 'DataSourceManager')
@mock.patch.object(strategies.base, 'CONF')
def test_global_preference_reverse(self, m_conf, m_manager):
"""Test if the global preference is used with another order"""
m_conf.watcher_datasources.datasources = \
['ceilometer', 'monasca', 'gnocchi']
# Make sure we access the property and not the underlying function.
m_manager.return_value.get_backend.return_value = \
mock.NonCallableMock()
# Access the property so that the configuration is read in order to
# get the correct datasource
self.strategy.datasource_backend
m_manager.assert_called_once_with(
config=m_conf.watcher_datasources, osc=None)
@mock.patch.object(strategies.BaseStrategy, 'osc', None)
@mock.patch.object(manager, 'DataSourceManager')
@mock.patch.object(strategies.base, 'CONF')
def test_strategy_preference_override(self, m_conf, m_manager):
"""Test if the global preference can be overridden"""
datasources = mock.Mock(datasources=['ceilometer'])
self.strategy = strategies.DummyStrategy(
config=datasources)
m_conf.watcher_datasources.datasources = \
['ceilometer', 'monasca', 'gnocchi']
# Access the property so that the configuration is read in order to
# get the correct datasource
self.strategy.datasource_backend
m_manager.assert_called_once_with(
config=datasources, osc=None)
class TestBaseStrategyException(TestBaseStrategy):
def setUp(self):
super(TestBaseStrategyException, self).setUp()
def test_exception_model(self):
self.m_c_model.return_value = None
self.assertRaises(
exception.ClusterStateNotDefined, self.strategy.execute)
def test_exception_stale_cdm(self):
self.fake_c_cluster.set_cluster_data_model_as_stale()
self.m_c_model.return_value = self.fake_c_cluster.cluster_data_model
self.assertRaises(
# TODO(Dantali0n) This should return ClusterStale,
# improve set_cluster_data_model_as_stale().
exception.ClusterStateNotDefined,
self.strategy.execute)
| 36.673759
| 76
| 0.705473
| 623
| 5,171
| 5.675762
| 0.272873
| 0.027998
| 0.046663
| 0.056561
| 0.554299
| 0.465215
| 0.413744
| 0.386029
| 0.353224
| 0.353224
| 0
| 0.002446
| 0.209244
| 5,171
| 140
| 77
| 36.935714
| 0.862314
| 0.245794
| 0
| 0.460526
| 0
| 0
| 0.046126
| 0
| 0
| 0
| 0
| 0.007143
| 0.065789
| 1
| 0.105263
| false
| 0
| 0.092105
| 0
| 0.236842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ad243bce2bf880a6b70228da5819c87e92c557b
| 776
|
py
|
Python
|
test/test_sampler.py
|
pfnet-research/autogbt-alt
|
57f7ae1bce2923d11f73c3631e34be49c7dd25da
|
[
"MIT"
] | 83
|
2019-04-01T05:45:37.000Z
|
2021-04-13T02:33:04.000Z
|
test/test_sampler.py
|
pfnet-research/autogbt-alt
|
57f7ae1bce2923d11f73c3631e34be49c7dd25da
|
[
"MIT"
] | null | null | null |
test/test_sampler.py
|
pfnet-research/autogbt-alt
|
57f7ae1bce2923d11f73c3631e34be49c7dd25da
|
[
"MIT"
] | 10
|
2019-04-15T03:15:42.000Z
|
2020-03-30T11:52:12.000Z
|
import numpy as np
import pandas as pd
from autogbt.sampler import MajorityUnderSampler
def _test_sample(y):
sampler = MajorityUnderSampler()
idx = sampler.sample(y, 40000, 3.0)
assert len(idx) == 40000
assert y[idx].sum() == 10000
def test_sample_with_series():
y = pd.Series(np.concatenate([np.ones((10000)), np.zeros((100000))]))
y = y.sample(frac=1.0)
_test_sample(y)
def test_sample_with_ndarray():
y = np.concatenate([np.ones((10000)), np.zeros((100000))])
_test_sample(y)
def test_sample_for_regression():
y = np.concatenate([
2*np.ones((10000)),
1*np.ones((10000)),
0*np.ones((10000)),
])
sampler = MajorityUnderSampler()
idx = sampler.sample(y, 0.1, 3.0)
assert len(idx) == 3000
| 23.515152
| 73
| 0.640464
| 111
| 776
| 4.342342
| 0.315315
| 0.124481
| 0.114108
| 0.153527
| 0.493776
| 0.435685
| 0.153527
| 0.153527
| 0
| 0
| 0
| 0.108065
| 0.201031
| 776
| 32
| 74
| 24.25
| 0.669355
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.166667
| false
| 0
| 0.125
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ad3e60ef95d7e5c040fd394c92201b95875defd
| 1,155
|
py
|
Python
|
main.py
|
thewhiteninja/twitch-recorder
|
815b571e22917daa906d054a8ab2fe794e99bb8a
|
[
"MIT"
] | null | null | null |
main.py
|
thewhiteninja/twitch-recorder
|
815b571e22917daa906d054a8ab2fe794e99bb8a
|
[
"MIT"
] | null | null | null |
main.py
|
thewhiteninja/twitch-recorder
|
815b571e22917daa906d054a8ab2fe794e99bb8a
|
[
"MIT"
] | null | null | null |
import glob
import os
import sys
import utils
from recorder import StreamRec
OUTDIR = ""
def parse_args(a):
global OUTDIR
i = 1
while i < len(a):
if a[i] in ["-h", "--help", "/?"]:
usage()
if a[i] in ["-d", "--dir"]:
OUTDIR = a[i + 1]
i += 1
i += 1
def usage():
print("Record your favorite Twitch streams!")
print("Check an example of .stream file in data/ to see how to add a stream to record")
print()
print("Usage: %s [Options]" % (os.path.basename(sys.argv[0])))
print()
print("Options :")
print(" -d, --dir : Output directory")
print(" -h, --help : Help")
sys.exit(1)
def load_streams():
all_inst = []
stream_files = glob.glob('data/**/*.stream', recursive=True)
for stream_file in stream_files:
inst = StreamRec(stream_file, OUTDIR)
all_inst.append(inst)
for inst in all_inst:
inst.start()
for inst in all_inst:
inst.join()
def main():
utils.welcome()
parse_args(sys.argv)
utils.make_directory(OUTDIR)
load_streams()
if __name__ == '__main__':
main()
| 20.625
| 91
| 0.565368
| 158
| 1,155
| 4
| 0.411392
| 0.012658
| 0.012658
| 0.018987
| 0.063291
| 0.063291
| 0
| 0
| 0
| 0
| 0
| 0.007282
| 0.28658
| 1,155
| 55
| 92
| 21
| 0.759709
| 0
| 0
| 0.139535
| 0
| 0
| 0.204329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.116279
| 0
| 0.209302
| 0.186047
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ad40da9c9320f7c8df4a83d064f6172f24c03ec
| 2,268
|
py
|
Python
|
karbor-1.3.0/karbor/policies/protectables.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
karbor-1.3.0/karbor/policies/protectables.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
karbor-1.3.0/karbor/policies/protectables.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright (c) 2017 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from karbor.policies import base
GET_POLICY = 'protectable:get'
GET_ALL_POLICY = 'protectable:get_all'
INSTANCES_GET_POLICY = 'protectable:instance_get'
INSTANCES_GET_ALL_POLICY = 'protectable:instance_get_all'
protectables_policies = [
policy.DocumentedRuleDefault(
name=GET_POLICY,
check_str=base.RULE_ADMIN_OR_OWNER,
description='Show a protectable type.',
operations=[
{
'method': 'GET',
'path': '/protectables/{protectable_type}'
}
]),
policy.DocumentedRuleDefault(
name=GET_ALL_POLICY,
check_str=base.RULE_ADMIN_OR_OWNER,
description='List protectable types.',
operations=[
{
'method': 'GET',
'path': '/protectables'
}
]),
policy.DocumentedRuleDefault(
name=INSTANCES_GET_POLICY,
check_str=base.RULE_ADMIN_OR_OWNER,
description='Show a protectable instance.',
operations=[
{
'method': 'GET',
'path': '/protectables/{protectable_type}/'
'instances/{resource_id}'
}
]),
policy.DocumentedRuleDefault(
name=INSTANCES_GET_ALL_POLICY,
check_str=base.RULE_ADMIN_OR_OWNER,
description='List protectable instances.',
operations=[
{
'method': 'GET',
'path': '/protectables/{protectable_type}/instances'
}
]),
]
def list_rules():
return protectables_policies
| 31.068493
| 78
| 0.619489
| 238
| 2,268
| 5.718487
| 0.42437
| 0.044085
| 0.035268
| 0.052902
| 0.398971
| 0.314475
| 0.314475
| 0.277737
| 0.191036
| 0.191036
| 0
| 0.004963
| 0.289242
| 2,268
| 72
| 79
| 31.5
| 0.83933
| 0.272928
| 0
| 0.392157
| 0
| 0
| 0.234394
| 0.111383
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.039216
| 0.019608
| 0.078431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ad467fa9905c0ca84ad3c1dc298047956f35818
| 252
|
py
|
Python
|
notebooks/2018.11.09 Meeting.py
|
costrouc/uarray
|
c3c42147181a88265942ad5f9cf439467f746782
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/2018.11.09 Meeting.py
|
costrouc/uarray
|
c3c42147181a88265942ad5f9cf439467f746782
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/2018.11.09 Meeting.py
|
costrouc/uarray
|
c3c42147181a88265942ad5f9cf439467f746782
|
[
"BSD-3-Clause"
] | null | null | null |
#%%
from uarray.core import *
#%%
s = Scalar(Int(10))
#%%
@operation
def Always(a: T) -> CCallableUnary[T, CContent]:
...
#%%
register(Call(Always(w("a")), w("idx")), lambda a, idx: a)
#%%
a_ten = Always(s)
#%%
s = Sequence(Int(10), a_ten)
| 10.956522
| 58
| 0.559524
| 37
| 252
| 3.756757
| 0.594595
| 0.071942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019417
| 0.18254
| 252
| 22
| 59
| 11.454545
| 0.65534
| 0.047619
| 0
| 0
| 0
| 0
| 0.017094
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ad4fd638f3c8440ee1f4046774d447aac8466fb
| 2,540
|
py
|
Python
|
var/spack/repos/builtin/packages/py-black/package.py
|
dwstreetNNL/spack
|
8f929707147c49606d00386a10161529dad4ec56
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-black/package.py
|
dwstreetNNL/spack
|
8f929707147c49606d00386a10161529dad4ec56
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-black/package.py
|
dwstreetNNL/spack
|
8f929707147c49606d00386a10161529dad4ec56
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBlack(PythonPackage):
"""Black is the uncompromising Python code formatter. By using it, you agree to
cede control over minutiae of hand-formatting. In return, Black gives you
speed, determinism, and freedom from pycodestyle nagging about formatting.
"""
homepage = "https://github.com/psf/black"
url = "https://pypi.io/packages/source/b/black/black-20.8b1.tar.gz"
version('20.8b1', sha256='1c02557aa099101b9d21496f8a914e9ed2222ef70336404eeeac8edba836fbea')
version('19.3b0', sha256='68950ffd4d9169716bcb8719a56c07a2f4485354fec061cdd5910aa07369731c')
version('18.9b0', sha256='e030a9a28f542debc08acceb273f228ac422798e5215ba2a791a6ddeaaca22a5')
variant('d', default=False, description='enable blackd HTTP server')
depends_on('[email protected]:')
# Needs setuptools at runtime so that `import pkg_resources` succeeds
# See #8843 and #8689 for examples of setuptools added as a runtime dep
depends_on('py-setuptools', type=('build', 'run'))
# Translated from black's setup.py:
# https://github.com/ambv/black/blob/master/setup.py
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@20.8b1:', type=('build', 'run'))
depends_on('[email protected]:', when='@:20.8b0', type=('build', 'run'))
depends_on('py-appdirs', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@20.8b1:', type=('build', 'run'))
depends_on('[email protected]:', when='@19.10b0:', type=('build', 'run'))
depends_on('[email protected]:', when='@20.8b0:', type=('build', 'run'))
depends_on('[email protected]:0.999', when='@19.10b0:', type=('build', 'run'))
depends_on('[email protected]:', when='@20.8b0:^python@:3.6', type=('build', 'run'))
depends_on('[email protected]:', when='@20.8b0:', type=('build', 'run'))
depends_on('[email protected]:', when='@20.8b0:', type=('build', 'run'))
depends_on('[email protected]:', when='+d', type=('build', 'run'))
depends_on('py-aiohttp-cors', when='+d', type=('build', 'run'))
@property
def import_modules(self):
modules = ['blib2to3', 'blib2to3.pgen2', 'black']
if '+d' in self.spec:
modules.append('blackd')
return modules
| 47.924528
| 96
| 0.663386
| 354
| 2,540
| 4.70904
| 0.446328
| 0.086383
| 0.09898
| 0.14817
| 0.25135
| 0.238152
| 0.210558
| 0.188362
| 0.154769
| 0
| 0
| 0.115509
| 0.144488
| 2,540
| 52
| 97
| 48.846154
| 0.651634
| 0.250394
| 0
| 0
| 0
| 0.033333
| 0.446695
| 0.141258
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.066667
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ad54c23aea43b34f6b32a39f371e9919e5e2f64
| 3,772
|
py
|
Python
|
store/adminshop/templatetags/admin_extras.py
|
vallemrv/my_store_test
|
2da624fd02c5f1784464f15b751b488f3dd2bae6
|
[
"Apache-2.0"
] | null | null | null |
store/adminshop/templatetags/admin_extras.py
|
vallemrv/my_store_test
|
2da624fd02c5f1784464f15b751b488f3dd2bae6
|
[
"Apache-2.0"
] | null | null | null |
store/adminshop/templatetags/admin_extras.py
|
vallemrv/my_store_test
|
2da624fd02c5f1784464f15b751b488f3dd2bae6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 27-Aug-2017
# @Email: [email protected]
# @Filename: admin_extras.py
# @Last modified by: valle
# @Last modified time: 02-Feb-2018
# @License: Apache license vesion 2.0
from django import template
from django.db.models import Q
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from adminshop.models import Testeo, Compras, Presupuesto
import json
import sys
register = template.Library()
@register.filter(name='get_nombre_cliente')
def get_nombre_cliente(f):
return f.get_nombre_cliente()
@register.filter(name='enviado')
def enviado(f):
return "No" if not f.enviado else "Si"
@register.filter(name='get_user')
def get_user(f):
return f.get_user()
@register.filter(name='get_ns_imei')
def get_ns_imei(f):
return f.get_ns_imei()
@register.filter(name='get_producto_pk')
def get_producto_pk(f):
return f.get_producto_pk()
@register.filter(name='addcss')
def addcss(field, css):
return field.as_widget(attrs={"class":css})
@register.filter(name='reparacion')
def reparacion(p):
try:
pres = Presupuesto.objects.filter(producto__pk=p.id)[0]
return pres.notas_cliente
except:
return ""
@register.filter(name='num_pres')
def num_pres(p):
try:
pres = Presupuesto.objects.filter(producto__pk=p.id)[0]
return pres.pk
except:
return -1
@register.filter(name='precio_venta')
def precio_venta(p):
precio = 0 if p.precio_venta == None else p.precio_venta
return "{0:.2f} €".format(precio)
@register.filter(name='precio_usado')
def precio_usado(p):
return "{0:.2f} €".format(p.modelo.precio_usado * p.tipo.incremento)
@register.filter(name='document_show')
def document_show(p):
compras = Compras.objects.filter(producto__id=p.pk)
if len(compras) > 0:
compra = compras[0]
else:
compra = Compras()
return p.estado in ["ST", "VD", "OL", "VT"]
@register.filter(name='document_href')
def document_href(p):
if p.estado in ["ST", "VT", "OL"]:
return reverse("get_document_by_id", args=[p.pk])
elif p.estado in ["RP", "OK", "PD"]:
return reverse("get_presupuesto_pdf", args=[p.pk])
elif p.estado == "VD":
return reverse("get_all_document", args=[p.pk])
else:
return "#"
@register.filter(name='have_sign')
def have_sign(p):
compras = Compras.objects.filter(producto__id=p.pk)
compra = Compras()
if len(compras) > 0:
compra = compras[0]
return p.estado in ["ST", "VD", "OL", "VT"] and compra.firma == ""
@register.filter(name='editable')
def editable(p):
return p.estado in ["ST", "OL", "VT"]
@register.simple_tag(name='get_estado_value')
def get_estado_value(test_id, p_id, estado):
testeos = Testeo.objects.filter(Q(descripcion__pk=test_id) &
Q(producto__pk=p_id))
send = ""
if len(testeos) > 0 and testeos[0].estado == estado:
send = "selected"
return send
@register.filter(name='addattrs')
def addattrs(field, args):
attr = {}
try:
args_parse = args.replace("'", '"')
attr = json.loads(args_parse)
except Exception as error:
print(error)
return field.as_widget(attrs=attr)
@register.filter('klass')
def klass(ob):
return ob.field.widget.__class__.__name__
@register.filter('display')
def display(form, value):
return dict(form.field.choices)[value]
@register.filter('modelo')
def modelo(p):
if p.modelo != None:
return str(p.modelo)
else:
return p.detalle
@register.filter('marca')
def marca(p):
if p.modelo != None:
return str(p.modelo.marca)
else:
return ""
| 24.980132
| 72
| 0.656946
| 535
| 3,772
| 4.48972
| 0.271028
| 0.110741
| 0.112406
| 0.034971
| 0.202748
| 0.162365
| 0.147377
| 0.124896
| 0.105745
| 0.046628
| 0
| 0.009539
| 0.194062
| 3,772
| 150
| 73
| 25.146667
| 0.779934
| 0.060445
| 0
| 0.214286
| 0
| 0
| 0.091655
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0
| 0.071429
| 0.089286
| 0.491071
| 0.008929
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ad59b00bcc766f57088e62e448110d102b95431
| 17,165
|
py
|
Python
|
doc/tutorial/using_gpu_solution_1.py
|
abdalazizrashid/Theano-PyMC
|
90fa750461e91fb6281d494ae86404e2153fd7eb
|
[
"BSD-3-Clause"
] | null | null | null |
doc/tutorial/using_gpu_solution_1.py
|
abdalazizrashid/Theano-PyMC
|
90fa750461e91fb6281d494ae86404e2153fd7eb
|
[
"BSD-3-Clause"
] | null | null | null |
doc/tutorial/using_gpu_solution_1.py
|
abdalazizrashid/Theano-PyMC
|
90fa750461e91fb6281d494ae86404e2153fd7eb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Aesara tutorial
# Solution to Exercise in section 'Using the GPU'
# 1. Raw results
import numpy as np
import aesara
import aesara.tensor as tt
aesara.config.floatX = "float32"
rng = np.random
N = 400
feats = 784
D = (
rng.randn(N, feats).astype(aesara.config.floatX),
rng.randint(size=N, low=0, high=2).astype(aesara.config.floatX),
)
training_steps = 10000
# Declare Aesara symbolic variables
x = aesara.shared(D[0], name="x")
y = aesara.shared(D[1], name="y")
w = aesara.shared(rng.randn(feats).astype(aesara.config.floatX), name="w")
b = aesara.shared(np.asarray(0.0, dtype=aesara.config.floatX), name="b")
x.tag.test_value = D[0]
y.tag.test_value = D[1]
# print "Initial model:"
# print w.get_value(), b.get_value()
# Construct Aesara expression graph
p_1 = 1 / (1 + tt.exp(-tt.dot(x, w) - b)) # Probability of having a one
prediction = p_1 > 0.5 # The prediction that is done: 0 or 1
xent = -y * tt.log(p_1) - (1 - y) * tt.log(1 - p_1) # Cross-entropy
cost = tt.cast(xent.mean(), "float32") + 0.01 * (w ** 2).sum() # The cost to optimize
gw, gb = tt.grad(cost, [w, b])
# Compile expressions to functions
train = aesara.function(
inputs=[],
outputs=[prediction, xent],
updates=[(w, w - 0.01 * gw), (b, b - 0.01 * gb)],
name="train",
)
predict = aesara.function(inputs=[], outputs=prediction, name="predict")
if any(
[
n.op.__class__.__name__ in ["Gemv", "CGemv", "Gemm", "CGemm"]
for n in train.maker.fgraph.toposort()
]
):
print("Used the cpu")
elif any(
[
n.op.__class__.__name__ in ["GpuGemm", "GpuGemv"]
for n in train.maker.fgraph.toposort()
]
):
print("Used the gpu")
else:
print("ERROR, not able to tell if aesara used the cpu or the gpu")
print(train.maker.fgraph.toposort())
for i in range(training_steps):
pred, err = train()
# print "Final model:"
# print w.get_value(), b.get_value()
print("target values for D")
print(D[1])
print("prediction on D")
print(predict())
"""
# 2. Profiling
# 2.1 Profiling for CPU computations
# In your terminal, type:
$ THEANO_FLAGS=profile=True,device=cpu python using_gpu_solution_1.py
# You'll see first the output of the script:
Used the cpu
target values for D
prediction on D
# Followed by the output of profiling.. You'll see profiling results for each function
# in the script, followed by a summary for all functions.
# We'll show here only the summary:
Results were produced using an Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz
Function profiling
==================
Message: Sum of all(2) printed profiles at exit excluding Scan op profile.
Time in 10001 calls to Function.__call__: 1.300452e+00s
Time in Function.fn.__call__: 1.215823e+00s (93.492%)
Time in thunks: 1.157602e+00s (89.015%)
Total compile time: 8.922548e-01s
Number of Apply nodes: 17
Aesara Optimizer time: 6.270301e-01s
Aesara validate time: 5.993605e-03s
Aesara Linker time (includes C, CUDA code generation/compiling): 2.949309e-02s
Import time 3.543139e-03s
Time in all call to aesara.grad() 1.848292e-02s
Time since aesara import 2.864s
Class
---
<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Class name>
64.5% 64.5% 0.747s 3.73e-05s C 20001 3 aesara.tensor.blas_c.CGemv
33.1% 97.7% 0.384s 4.79e-06s C 80001 9 aesara.tensor.elemwise.Elemwise
1.0% 98.6% 0.011s 1.14e-06s C 10000 1 aesara.tensor.elemwise.Sum
0.7% 99.4% 0.009s 2.85e-07s C 30001 4 aesara.tensor.elemwise.DimShuffle
0.3% 99.7% 0.004s 3.64e-07s C 10001 2 aesara.tensor.basic.AllocEmpty
0.3% 100.0% 0.004s 1.78e-07s C 20001 3 aesara.compile.ops.Shape_i
... (remaining 0 Classes account for 0.00%(0.00s) of the runtime)
Ops
---
<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Op name>
64.5% 64.5% 0.747s 3.73e-05s C 20001 3 CGemv{inplace}
18.7% 83.2% 0.217s 2.17e-05s C 10000 1 Elemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[(0, 4)]
8.9% 92.1% 0.103s 1.03e-05s C 10000 1 Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)]
4.3% 96.4% 0.050s 4.98e-06s C 10000 1 Elemwise{Composite{GT(scalar_sigmoid(i0), i1)}}
1.0% 97.4% 0.011s 1.14e-06s C 10000 1 Sum{acc_dtype=float64}
0.5% 97.9% 0.006s 2.83e-07s C 20001 3 InplaceDimShuffle{x}
0.4% 98.3% 0.004s 4.22e-07s C 10000 1 Elemwise{sub,no_inplace}
0.3% 98.6% 0.004s 3.70e-07s C 10000 1 Elemwise{neg,no_inplace}
0.3% 98.9% 0.004s 3.64e-07s C 10001 2 AllocEmpty{dtype='float32'}
0.3% 99.2% 0.004s 1.78e-07s C 20001 3 Shape_i{0}
0.2% 99.5% 0.003s 2.88e-07s C 10000 1 InplaceDimShuffle{1,0}
0.2% 99.7% 0.003s 2.65e-07s C 10000 1 Elemwise{Composite{((-i0) - i1)}}[(0, 0)]
0.2% 99.9% 0.002s 1.98e-07s C 10000 1 Elemwise{Cast{float32}}
0.1% 100.0% 0.002s 1.54e-07s C 10000 1 Elemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)]
0.0% 100.0% 0.000s 4.77e-06s C 1 1 Elemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}}
... (remaining 0 Ops account for 0.00%(0.00s) of the runtime)
Apply
------
<% time> <sum %> <apply time> <time per call> <#call> <id> <Apply name>
34.0% 34.0% 0.394s 3.94e-05s 10000 7 CGemv{inplace}(AllocEmpty{dtype='float32'}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0})
30.5% 64.5% 0.353s 3.53e-05s 10000 15 CGemv{inplace}(w, TensorConstant{-0.00999999977648}, x.T, Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)].0, TensorConstant{0.999800026417})
18.7% 83.2% 0.217s 2.17e-05s 10000 12 Elemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[(0, 4)](y, Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0, TensorConstant{(1,) of -1.0}, Elemwise{sub,no_inplace}.0, Elemwise{neg,no_inplace}.0)
8.9% 92.1% 0.103s 1.03e-05s 10000 13 Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)](Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0, TensorConstant{(1,) of -1.0}, y, Elemwise{Cast{float32}}.0, Elemwise{sub,no_inplace}.0)
4.3% 96.4% 0.050s 4.98e-06s 10000 11 Elemwise{Composite{GT(scalar_sigmoid(i0), i1)}}(Elemwise{neg,no_inplace}.0, TensorConstant{(1,) of 0.5})
1.0% 97.4% 0.011s 1.14e-06s 10000 14 Sum{acc_dtype=float64}(Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)].0)
0.4% 97.8% 0.004s 4.22e-07s 10000 4 Elemwise{sub,no_inplace}(TensorConstant{(1,) of 1.0}, y)
0.3% 98.1% 0.004s 3.76e-07s 10000 0 InplaceDimShuffle{x}(b)
0.3% 98.4% 0.004s 3.70e-07s 10000 10 Elemwise{neg,no_inplace}(Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0)
0.3% 98.7% 0.004s 3.64e-07s 10000 5 AllocEmpty{dtype='float32'}(Shape_i{0}.0)
0.2% 99.0% 0.003s 2.88e-07s 10000 2 InplaceDimShuffle{1,0}(x)
0.2% 99.2% 0.003s 2.65e-07s 10000 9 Elemwise{Composite{((-i0) - i1)}}[(0, 0)](CGemv{inplace}.0, InplaceDimShuffle{x}.0)
0.2% 99.4% 0.002s 2.21e-07s 10000 1 Shape_i{0}(x)
0.2% 99.6% 0.002s 1.98e-07s 10000 8 Elemwise{Cast{float32}}(InplaceDimShuffle{x}.0)
0.2% 99.7% 0.002s 1.90e-07s 10000 6 InplaceDimShuffle{x}(Shape_i{0}.0)
0.1% 99.9% 0.002s 1.54e-07s 10000 16 Elemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)](b, TensorConstant{0.00999999977648}, Sum{acc_dtype=float64}.0)
0.1% 100.0% 0.001s 1.34e-07s 10000 3 Shape_i{0}(y)
0.0% 100.0% 0.000s 3.89e-05s 1 3 CGemv{inplace}(AllocEmpty{dtype='float32'}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0})
0.0% 100.0% 0.000s 4.77e-06s 1 4 Elemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}}(CGemv{inplace}.0, InplaceDimShuffle{x}.0, TensorConstant{(1,) of 0.5})
0.0% 100.0% 0.000s 1.19e-06s 1 0 InplaceDimShuffle{x}(b)
... (remaining 2 Apply instances account for 0.00%(0.00s) of the runtime)
# 2.2 Profiling for GPU computations
# In your terminal, type:
$ CUDA_LAUNCH_BLOCKING=1 THEANO_FLAGS=profile=True,device=cuda python using_gpu_solution_1.py
# You'll see first the output of the script:
Used the gpu
target values for D
prediction on D
Results were produced using a GeForce GTX TITAN X
# Profiling summary for all functions:
Function profiling
==================
Message: Sum of all(2) printed profiles at exit excluding Scan op profile.
Time in 10001 calls to Function.__call__: 4.181247e+00s
Time in Function.fn.__call__: 4.081113e+00s (97.605%)
Time in thunks: 3.915566e+00s (93.646%)
Total compile time: 9.256095e+00s
Number of Apply nodes: 21
Aesara Optimizer time: 9.996419e-01s
Aesara validate time: 6.523132e-03s
Aesara Linker time (includes C, CUDA code generation/compiling): 8.239602e+00s
Import time 4.228115e-03s
Time in all call to aesara.grad() 3.286195e-02s
Time since aesara import 15.415s
Class
---
<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Class name>
59.5% 59.5% 2.329s 1.16e-04s C 20001 3 aesara.sandbox.gpuarray.blas.GpuGemv
29.8% 89.3% 1.166s 1.30e-05s C 90001 10 aesara.sandbox.gpuarray.elemwise.GpuElemwise
4.1% 93.4% 0.162s 8.10e-06s C 20001 3 aesara.sandbox.gpuarray.basic_ops.HostFromGpu
3.3% 96.7% 0.131s 1.31e-05s C 10000 1 aesara.sandbox.gpuarray.elemwise.GpuCAReduceCuda
1.6% 98.3% 0.061s 6.10e-06s C 10000 1 aesara.sandbox.gpuarray.basic_ops.GpuFromHost
0.8% 99.1% 0.033s 1.09e-06s C 30001 4 aesara.sandbox.gpuarray.elemwise.GpuDimShuffle
0.7% 99.8% 0.026s 2.59e-06s C 10001 2 aesara.sandbox.gpuarray.basic_ops.GpuAllocEmpty
0.2% 100.0% 0.008s 3.95e-07s C 20001 3 aesara.compile.ops.Shape_i
... (remaining 0 Classes account for 0.00%(0.00s) of the runtime)
Ops
---
<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Op name>
59.5% 59.5% 2.329s 1.16e-04s C 20001 3 GpuGemv{inplace=True}
4.1% 63.6% 0.162s 8.10e-06s C 20001 3 HostFromGpu(gpuarray)
4.0% 67.6% 0.157s 1.57e-05s C 10000 1 GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray>
3.8% 71.4% 0.149s 1.49e-05s C 10000 1 GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>
3.7% 75.1% 0.144s 1.44e-05s C 10000 1 GpuElemwise{sub,no_inplace}
3.6% 78.7% 0.141s 1.41e-05s C 10000 1 GpuElemwise{gt,no_inplace}
3.4% 82.1% 0.133s 1.33e-05s C 10000 1 GpuElemwise{Cast{float32}}[]<gpuarray>
3.4% 85.5% 0.133s 1.33e-05s C 10000 1 GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>
3.3% 88.8% 0.131s 1.31e-05s C 10000 1 GpuCAReduceCuda{add}
2.9% 91.7% 0.112s 1.12e-05s C 10000 1 GpuElemwise{neg,no_inplace}
2.6% 94.3% 0.102s 1.02e-05s C 10000 1 GpuElemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)]<gpuarray>
2.5% 96.7% 0.096s 9.63e-06s C 10000 1 GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>
1.6% 98.3% 0.061s 6.10e-06s C 10000 1 GpuFromHost<None>
0.7% 99.0% 0.026s 2.59e-06s C 10001 2 GpuAllocEmpty{dtype='float32', context_name=None}
0.5% 99.5% 0.021s 1.06e-06s C 20001 3 InplaceGpuDimShuffle{x}
0.3% 99.8% 0.011s 1.14e-06s C 10000 1 InplaceGpuDimShuffle{1,0}
0.2% 100.0% 0.008s 3.95e-07s C 20001 3 Shape_i{0}
0.0% 100.0% 0.000s 2.00e-05s C 1 1 GpuElemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}}[]<gpuarray>
... (remaining 0 Ops account for 0.00%(0.00s) of the runtime)
Apply
------
<% time> <sum %> <apply time> <time per call> <#call> <id> <Apply name>
55.0% 55.0% 2.154s 2.15e-04s 10000 7 GpuGemv{inplace=True}(GpuAllocEmpty{dtype='float32', context_name=None}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0})
4.5% 59.5% 0.176s 1.76e-05s 10000 18 GpuGemv{inplace=True}(w, TensorConstant{-0.00999999977648}, InplaceGpuDimShuffle{1,0}.0, GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>.0, TensorConstant{0.999800026417})
4.0% 63.5% 0.157s 1.57e-05s 10000 12 GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray>(y, GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[-1.]}, GpuElemwise{sub,no_inplace}.0, GpuElemwise{neg,no_inplace}.0)
3.8% 67.3% 0.149s 1.49e-05s 10000 15 GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>(GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[-1.]}, y, GpuElemwise{Cast{float32}}[]<gpuarray>.0, GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>.0, GpuElemwise{sub,no_inplace}.0)
3.7% 71.0% 0.144s 1.44e-05s 10000 4 GpuElemwise{sub,no_inplace}(GpuArrayConstant{[ 1.]}, y)
3.6% 74.6% 0.141s 1.41e-05s 10000 16 GpuElemwise{gt,no_inplace}(GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[ 0.5]})
3.4% 78.0% 0.133s 1.33e-05s 10000 10 GpuElemwise{Cast{float32}}[]<gpuarray>(InplaceGpuDimShuffle{x}.0)
3.4% 81.4% 0.133s 1.33e-05s 10000 9 GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>(GpuGemv{inplace=True}.0, InplaceGpuDimShuffle{x}.0)
3.3% 84.7% 0.131s 1.31e-05s 10000 17 GpuCAReduceCuda{add}(GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>.0)
2.9% 87.5% 0.112s 1.12e-05s 10000 11 GpuElemwise{neg,no_inplace}(GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0)
2.6% 90.1% 0.102s 1.02e-05s 10000 20 GpuElemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)]<gpuarray>(b, GpuArrayConstant{0.00999999977648}, GpuCAReduceCuda{add}.0)
2.5% 92.6% 0.096s 9.63e-06s 10000 13 GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>(GpuElemwise{neg,no_inplace}.0)
2.3% 94.9% 0.090s 9.04e-06s 10000 19 HostFromGpu(gpuarray)(GpuElemwise{gt,no_inplace}.0)
1.8% 96.7% 0.072s 7.16e-06s 10000 14 HostFromGpu(gpuarray)(GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray>.0)
1.6% 98.3% 0.061s 6.10e-06s 10000 6 GpuFromHost<None>(Shape_i{0}.0)
0.7% 99.0% 0.026s 2.59e-06s 10000 5 GpuAllocEmpty{dtype='float32', context_name=None}(Shape_i{0}.0)
0.3% 99.3% 0.013s 1.33e-06s 10000 0 InplaceGpuDimShuffle{x}(b)
0.3% 99.6% 0.011s 1.14e-06s 10000 2 InplaceGpuDimShuffle{1,0}(x)
0.2% 99.8% 0.008s 7.94e-07s 10000 8 InplaceGpuDimShuffle{x}(GpuFromHost<None>.0)
0.1% 99.9% 0.005s 5.27e-07s 10000 1 Shape_i{0}(x)
... (remaining 7 Apply instances account for 0.07%(0.00s) of the runtime)
# 3. Conclusions
Examine and compare 'Ops' summaries for CPU and GPU. Usually GPU ops 'GpuFromHost' and 'HostFromGpu' by themselves
consume a large amount of extra time, but by making as few as possible data transfers between GPU and CPU, you can minimize their overhead.
Notice that each of the GPU ops consumes more time than its CPU counterpart. This is because the ops operate on small inputs;
if you increase the input data size (e.g. set N = 4000), you will see a gain from using the GPU.
"""
| 62.192029
| 374
| 0.579435
| 2,707
| 17,165
| 3.629479
| 0.16956
| 0.015878
| 0.017812
| 0.029415
| 0.567226
| 0.45028
| 0.381069
| 0.344733
| 0.27888
| 0.250483
| 0
| 0.197341
| 0.268162
| 17,165
| 275
| 375
| 62.418182
| 0.584779
| 0.024061
| 0
| 0.075472
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.056604
| 0
| 0.056604
| 0.150943
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ad5dcf7e9f96dc2d1c33142dc858481b208540e
| 1,242
|
py
|
Python
|
chainercv/transforms/bbox/translate_bbox.py
|
souravsingh/chainercv
|
8f76510472bc95018c183e72f37bc6c34a89969c
|
[
"MIT"
] | 1
|
2018-08-24T02:28:31.000Z
|
2018-08-24T02:28:31.000Z
|
chainercv/transforms/bbox/translate_bbox.py
|
souravsingh/chainercv
|
8f76510472bc95018c183e72f37bc6c34a89969c
|
[
"MIT"
] | null | null | null |
chainercv/transforms/bbox/translate_bbox.py
|
souravsingh/chainercv
|
8f76510472bc95018c183e72f37bc6c34a89969c
|
[
"MIT"
] | 2
|
2019-12-16T02:20:26.000Z
|
2022-01-17T02:00:49.000Z
|
def translate_bbox(bbox, y_offset=0, x_offset=0):
"""Translate bounding boxes.
This method is mainly used together with image transforms, such as padding
and cropping, which translates the left top point of the image from
coordinate :math:`(0, 0)` to coordinate
:math:`(y, x) = (y_{offset}, x_{offset})`.
The bounding boxes are expected to be packed into a two dimensional
tensor of shape :math:`(R, 4)`, where :math:`R` is the number of
bounding boxes in the image. The second axis represents attributes of
the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`,
where the four attributes are coordinates of the top left and the
bottom right vertices.
Args:
bbox (~numpy.ndarray): Bounding boxes to be transformed. The shape is
:math:`(R, 4)`. :math:`R` is the number of bounding boxes.
y_offset (int or float): The offset along y axis.
x_offset (int or float): The offset along x axis.
Returns:
~numpy.ndarray:
Bounding boxes translated according to the given offsets.
"""
out_bbox = bbox.copy()
out_bbox[:, :2] += (y_offset, x_offset)
out_bbox[:, 2:] += (y_offset, x_offset)
return out_bbox
| 37.636364
| 78
| 0.65942
| 190
| 1,242
| 4.210526
| 0.415789
| 0.0975
| 0.03
| 0.0525
| 0.2075
| 0.2075
| 0.2075
| 0.0775
| 0
| 0
| 0
| 0.008439
| 0.236715
| 1,242
| 32
| 79
| 38.8125
| 0.835443
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ad9fa52c59620d080c895b1dcbcc37ef6f3e407
| 504
|
py
|
Python
|
behave/features/environment.py
|
ministryofjustice/cla-end-to-end-tests
|
3d7e525c17f38403a91087c2b1af460ca1109a9b
|
[
"MIT"
] | 1
|
2022-02-09T13:12:57.000Z
|
2022-02-09T13:12:57.000Z
|
behave/features/environment.py
|
ministryofjustice/cla-end-to-end-tests
|
3d7e525c17f38403a91087c2b1af460ca1109a9b
|
[
"MIT"
] | 3
|
2021-09-16T12:24:44.000Z
|
2022-03-08T10:21:26.000Z
|
behave/features/environment.py
|
ministryofjustice/cla-end-to-end-tests
|
3d7e525c17f38403a91087c2b1af460ca1109a9b
|
[
"MIT"
] | null | null | null |
import os
from configparser import ConfigParser
from helper.helper_web import get_browser
def before_all(context):
config = ConfigParser()
print((os.path.join(os.getcwd(), 'setup.cfg')))
my_file = (os.path.join(os.getcwd(), 'setup.cfg'))
config.read(my_file)
# Reading the browser type from the configuration file
helper_func = get_browser(config.get('Environment', 'Browser'))
context.helperfunc = helper_func
def after_all(context):
context.helperfunc.close()
| 28
| 67
| 0.71627
| 67
| 504
| 5.253731
| 0.462687
| 0.056818
| 0.056818
| 0.068182
| 0.147727
| 0.147727
| 0.147727
| 0
| 0
| 0
| 0
| 0
| 0.164683
| 504
| 18
| 68
| 28
| 0.836105
| 0.103175
| 0
| 0
| 0
| 0
| 0.079823
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.25
| 0
| 0.416667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ada8f8c31036f868e794a58c29dd691ac89f964
| 2,422
|
py
|
Python
|
recipe_organizer/gui/recipe_list/recipe_source.py
|
j-sommer/recipe-organizer
|
91d39e12c453ecf3d3254645b565bbceacaecde9
|
[
"MIT"
] | null | null | null |
recipe_organizer/gui/recipe_list/recipe_source.py
|
j-sommer/recipe-organizer
|
91d39e12c453ecf3d3254645b565bbceacaecde9
|
[
"MIT"
] | null | null | null |
recipe_organizer/gui/recipe_list/recipe_source.py
|
j-sommer/recipe-organizer
|
91d39e12c453ecf3d3254645b565bbceacaecde9
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from tkinter import Frame, Label
from recipe_organizer.events.event import Event, EventType
from recipe_organizer.events.event_observer import EventObserver
from recipe_organizer.events.event_publisher import EventPublisher
from recipe_organizer.gui.interfaces.widget_container import WidgetContainer
from recipe_organizer.gui.recipe_summary.recipe_summary import RecipeSummary
from recipe_organizer.recipe.recipe import Recipe
class RecipeSource(Frame, WidgetContainer, EventObserver):
_MAX_COLUMN_COUNT = 6
_label_source_directory: Label
_recipe_summaries: [RecipeSummary] = []
_row_index = 0
def __init__(self, parent):
Frame.__init__(self, parent)
self.define_widgets()
self.define_layout()
EventPublisher.add(self)
def define_widgets(self) -> None:
self._label_source_directory = Label(self, text="-")
def define_layout(self) -> None:
self._label_source_directory.grid(row=self.__get_row_index())
def notify(self, event: Event) -> None:
if event.event_type == EventType.SOURCE_SET:
self._label_source_directory.configure(text=event.payload.name)
self.__load_recipes(event.payload)
def __get_row_index(self) -> int:
current_index = self._row_index
self._row_index += 1
return current_index
def __load_recipes(self, directory: Path):
recipes: [Recipe] = []
file_paths = directory.glob("**/*.json")
for file_path in file_paths:
with open(file_path, "r", encoding="utf-8") as file:
json_data = file.read()
try:
recipe = Recipe.from_json(json_data)
except KeyError:
pass
else:
recipes.append(recipe)
self.__create_list(recipes)
def __create_list(self, recipes: [Recipe]):
current_row_index = self.__get_row_index()
for index, recipe in enumerate(recipes):
if index % self._MAX_COLUMN_COUNT == 0:
current_row_index = self.__get_row_index()
recipe_summary = RecipeSummary(self, recipe)
recipe_summary.grid(row=current_row_index, column=index % self._MAX_COLUMN_COUNT, padx=16, pady=10)
self.columnconfigure(index, minsize=200)
self._recipe_summaries.append(recipe_summary)
| 34.112676
| 111
| 0.671346
| 284
| 2,422
| 5.366197
| 0.320423
| 0.052493
| 0.074803
| 0.049213
| 0.170604
| 0.081365
| 0.03937
| 0
| 0
| 0
| 0
| 0.006547
| 0.243187
| 2,422
| 70
| 112
| 34.6
| 0.824877
| 0
| 0
| 0.038462
| 0
| 0
| 0.006606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134615
| false
| 0.019231
| 0.153846
| 0
| 0.403846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ada8fe0ced127e4eb158cbef0bc674aa2bd2da2
| 917
|
py
|
Python
|
var/spack/repos/builtin/packages/spot/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-05-24T15:23:12.000Z
|
2020-05-24T15:23:12.000Z
|
var/spack/repos/builtin/packages/spot/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 6
|
2022-02-26T11:44:34.000Z
|
2022-03-12T12:14:50.000Z
|
var/spack/repos/builtin/packages/spot/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2021-01-06T18:58:26.000Z
|
2021-01-06T18:58:26.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Spot(AutotoolsPackage):
"""Spot is a C++11 library for omega-automata manipulation and model
checking."""
homepage = "https://spot.lrde.epita.fr/"
url = "http://www.lrde.epita.fr/dload/spot/spot-1.99.3.tar.gz"
version('1.99.3', sha256='86964af559994af4451a8dca663a9e1db6e869ed60e747ab60ce72dddc31b61b')
version('1.2.6', sha256='360678c75f6741f697e8e56cdbc9937f104eb723a839c3629f0dc5dc6de11bfc')
variant('python', default=True, description='Enable python API')
depends_on("[email protected]:", when='@1.99.5: +python')
depends_on("[email protected]:", when='@1.99: +python')
depends_on("python@2:", when='+python')
depends_on('boost', when='@:1.2.6')
| 38.208333
| 96
| 0.707743
| 120
| 917
| 5.375
| 0.616667
| 0.018605
| 0.069767
| 0.049612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15443
| 0.138495
| 917
| 23
| 97
| 39.869565
| 0.662025
| 0.288986
| 0
| 0
| 0
| 0.090909
| 0.507862
| 0.201258
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6adce2d4edcce50b7803e777bec26f0e2dbe1ef5
| 8,165
|
py
|
Python
|
GPT-distributed.py
|
wenhuchen/LogicNLG
|
e986516e5b6d310219215510b3fe1603d03215cd
|
[
"MIT"
] | 141
|
2020-04-23T03:30:16.000Z
|
2022-03-19T08:36:31.000Z
|
GPT-distributed.py
|
wenhuchen/LogicNLG
|
e986516e5b6d310219215510b3fe1603d03215cd
|
[
"MIT"
] | 15
|
2020-04-26T07:12:30.000Z
|
2021-06-10T16:40:35.000Z
|
GPT-distributed.py
|
wenhuchen/LogicNLG
|
e986516e5b6d310219215510b3fe1603d03215cd
|
[
"MIT"
] | 20
|
2020-04-27T03:07:10.000Z
|
2022-01-22T22:13:15.000Z
|
import argparse
import logging
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn
from torch.autograd import Variable
from transformers import GPT2Config
from transformers import GPT2LMHeadModel, GPT2Tokenizer, BertTokenizer
from DataLoader import *
from Model import BERTGen
from utils import sample_sequence
import torch.optim as optim
import math
import sys
import pandas
import os
import numpy
import nltk
from torch.utils.tensorboard import SummaryWriter
import warnings
from tqdm import tqdm, trange
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data import DataLoader as DL
import torch
from torch.utils.data.distributed import DistributedSampler
warnings.filterwarnings("ignore", category=UserWarning)
device = torch.device('cuda')
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model", default='gpt2', type=str)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
parser.add_argument('--do_train', default=False, action="store_true", help="whether to train or test the model")
parser.add_argument('--do_rl', default=False, action="store_true", help="whether to train or test the model")
parser.add_argument('--do_val', default=False, action="store_true", help="whether to train or test the model")
parser.add_argument('--do_test', default=False, action="store_true", help="whether to compute the BLEU scores on test split")
parser.add_argument('--do_test_challenge', default=False, action="store_true", help="whether to compute the BLEU scores on challenge split")
parser.add_argument('--do_ppl', default=False, action="store_true", help="whether to compute perplexity of the model")
parser.add_argument('--do_verify', default=False, action="store_true", help="whether compute the adv-acc score on test split")
parser.add_argument('--do_verify_challenge', default=False, action="store_true", help="whether compute the adv-acc score on challenge split")
parser.add_argument('--epoch', default=10, type=int, help="whether to train or test the model")
parser.add_argument('--batch_size', default=6, type=int, help="whether to train or test the model")
parser.add_argument('--local_rank', default=-1, type=int, help="whether to train or test the model")
parser.add_argument('--learning_rate', default=2e-6, type=float, help="whether to train or test the model")
parser.add_argument('--dataset', default='table', type=str, help="whether to train or test the model")
parser.add_argument('--every', default=50, type=int, help="whether to train or test the model")
parser.add_argument('--load_from', default='', type=str, help="whether to train or test the model")
parser.add_argument('--id', default='models', type=str, help="specify the id of the experiment")
parser.add_argument('--max_len', default=800, type=int, help="whether to train or test the model")
parser.add_argument('--dim', default=768, type=int, help="whether to train or test the model")
parser.add_argument('--layers', default=3, type=int, help="whether to train or test the model")
parser.add_argument('--head', default=4, type=int, help="whether to train or test the model")
parser.add_argument("--modelpath", type=str, default="bert-base-uncased",
help="For distributed training: local_rank")
parser.add_argument('--gradient_accumulation_steps', type=int, default=5, help="accumulation steps for gradient")
parser.add_argument('--decode_first_K', type=int, default=10000, help="For debugging purpose")
args = parser.parse_args()
if args.local_rank == -1:
device = torch.device("cuda")
args.n_gpu = 1
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
tokenizer = GPT2Tokenizer.from_pretrained(args.model)
model = GPT2LMHeadModel.from_pretrained(args.model)
#model = nn.DataParallel(model)
model.to(args.device)
if args.local_rank == 0:
torch.distributed.barrier()
criterion = nn.CrossEntropyLoss(reduction='none', ignore_index=-1)
if args.do_train:
if args.local_rank in [-1, 0]:
if not os.path.exists(args.id):
os.mkdir(args.id)
tb_writer = SummaryWriter(log_dir='tensorboard/GPT2-{}'.format(args.model))
dataset = GPTTableDataset2('data/train_lm_preprocessed.json', tokenizer, args.max_len)
if args.local_rank == -1:
sampler = RandomSampler(dataset)
else:
sampler = DistributedSampler(dataset)
train_dataloader = DL(dataset, sampler=sampler, batch_size=args.batch_size, num_workers=0)
model.train()
optimizer = optim.Adam(model.parameters(), args.learning_rate)
avg_loss = 0
global_step = 0
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank, find_unused_parameters=True)
else:
model = torch.nn.DataParallel(model)
for epoch_idx in trange(0, args.epoch, desc='Epoch', disable=args.local_rank not in [-1, 0]):
#for idx in range(0, dataset.train_len()):
for idx, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
batch = tuple(Variable(t).to(device) for t in batch)
trg_inp, trg_out, mask, caption = batch
inputs = torch.cat([caption, trg_inp], 1)
model.zero_grad()
optimizer.zero_grad()
logits = model(inputs)[0]
logits = logits[:, -trg_out.shape[1]:, :].contiguous()
loss = criterion(logits.view(-1, logits.shape[-1]), trg_out.view(-1))
loss = loss * mask.view(-1)
loss = loss.sum() / mask.sum()
avg_loss += loss.item()
loss.backward()
optimizer.step()
global_step += 1
if args.local_rank in [-1, 0] and idx % args.every == 0 and idx > 0:
tb_writer.add_scalar("perplexity", math.exp(avg_loss / args.every), global_step)
fake_inputs = caption
gt_inputs = trg_out.cpu().data.numpy()
#samples = model.sample(fake_inputs, tabfeat, caption, highlight_idx, bert)
samples = sample_sequence(model, 30, fake_inputs, [])
samples = samples[:, caption.shape[1]:]
samples = samples.cpu().data.numpy()
for s, gt in zip(samples, gt_inputs):
text = tokenizer.decode(s, clean_up_tokenization_spaces=True)
text = text[: text.find(tokenizer.eos_token)]
print("PREDICTION |||||| ", text)
text = tokenizer.decode(gt, clean_up_tokenization_spaces=True)
text = text[: text.find(tokenizer.eos_token)]
print("GROUNDTRUH |||||| ",text)
break
avg_loss = 0
if args.local_rank in [-1, 0]:
if args.model == 'gpt2':
torch.save(model.state_dict(), '{}/GPT_ep{}.pt'.format(args.id, epoch_idx))
else:
torch.save(model.state_dict(), '{}/GPT_medium_ep{}.pt'.format(args.id, epoch_idx))
if args.local_rank in [-1, 0]:
tb_writer.close()
| 47.196532
| 145
| 0.643846
| 1,068
| 8,165
| 4.779026
| 0.23221
| 0.04761
| 0.089929
| 0.049961
| 0.380486
| 0.330525
| 0.3029
| 0.27116
| 0.248824
| 0.239616
| 0
| 0.012314
| 0.23417
| 8,165
| 173
| 146
| 47.196532
| 0.803934
| 0.017759
| 0
| 0.135714
| 0
| 0
| 0.180095
| 0.012721
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007143
| false
| 0
| 0.185714
| 0
| 0.192857
| 0.014286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6addc56efc2458ffaaa37a8a1a9d3060123eac26
| 9,901
|
py
|
Python
|
bentoml/saved_bundle/loader.py
|
niits/BentoML
|
3954f36762e10f5df15af7e0ae6dd71f5f214261
|
[
"Apache-2.0"
] | 3,451
|
2019-04-02T01:47:42.000Z
|
2022-03-31T16:20:49.000Z
|
bentoml/saved_bundle/loader.py
|
niits/BentoML
|
3954f36762e10f5df15af7e0ae6dd71f5f214261
|
[
"Apache-2.0"
] | 1,925
|
2019-04-03T00:19:05.000Z
|
2022-03-31T22:41:54.000Z
|
bentoml/saved_bundle/loader.py
|
niits/BentoML
|
3954f36762e10f5df15af7e0ae6dd71f5f214261
|
[
"Apache-2.0"
] | 451
|
2019-04-02T01:53:41.000Z
|
2022-03-29T08:49:06.000Z
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import sys
import tarfile
import logging
import tempfile
import shutil
from functools import wraps
from contextlib import contextmanager
from urllib.parse import urlparse
from typing import TYPE_CHECKING
from pathlib import PureWindowsPath, PurePosixPath
from bentoml.utils.s3 import is_s3_url
from bentoml.utils.gcs import is_gcs_url
from bentoml.exceptions import BentoMLException
from bentoml.saved_bundle.config import SavedBundleConfig
from bentoml.saved_bundle.pip_pkg import ZIPIMPORT_DIR
if TYPE_CHECKING:
from bentoml.yatai.proto.repository_pb2 import BentoServiceMetadata
logger = logging.getLogger(__name__)
def _is_http_url(bundle_path) -> bool:
try:
return urlparse(bundle_path).scheme in ["http", "https"]
except ValueError:
return False
def _is_remote_path(bundle_path) -> bool:
return isinstance(bundle_path, str) and (
is_s3_url(bundle_path) or is_gcs_url(bundle_path) or _is_http_url(bundle_path)
)
@contextmanager
def _resolve_remote_bundle_path(bundle_path):
if is_s3_url(bundle_path):
import boto3
parsed_url = urlparse(bundle_path)
bucket_name = parsed_url.netloc
object_name = parsed_url.path.lstrip('/')
s3 = boto3.client('s3')
fileobj = io.BytesIO()
s3.download_fileobj(bucket_name, object_name, fileobj)
fileobj.seek(0, 0)
elif is_gcs_url(bundle_path):
try:
from google.cloud import storage
except ImportError:
raise BentoMLException(
'"google-cloud-storage" package is required. You can install it with '
'pip: "pip install google-cloud-storage"'
)
gcs = storage.Client()
fileobj = io.BytesIO()
gcs.download_blob_to_file(bundle_path, fileobj)
fileobj.seek(0, 0)
elif _is_http_url(bundle_path):
import requests
response = requests.get(bundle_path)
if response.status_code != 200:
raise BentoMLException(
f"Error retrieving BentoService bundle. "
f"{response.status_code}: {response.text}"
)
fileobj = io.BytesIO()
fileobj.write(response.content)
fileobj.seek(0, 0)
else:
raise BentoMLException(f"Saved bundle path: '{bundle_path}' is not supported")
with tarfile.open(mode="r:gz", fileobj=fileobj) as tar:
with tempfile.TemporaryDirectory() as tmpdir:
filename = tar.getmembers()[0].name
tar.extractall(path=tmpdir)
yield os.path.join(tmpdir, filename)
def resolve_remote_bundle(func):
"""Decorate a function to handle remote bundles."""
@wraps(func)
def wrapper(bundle_path, *args):
if _is_remote_path(bundle_path):
with _resolve_remote_bundle_path(bundle_path) as local_bundle_path:
return func(local_bundle_path, *args)
return func(bundle_path, *args)
return wrapper
@resolve_remote_bundle
def load_saved_bundle_config(bundle_path) -> "SavedBundleConfig":
try:
return SavedBundleConfig.load(os.path.join(bundle_path, "bentoml.yml"))
except FileNotFoundError:
raise BentoMLException(
"BentoML can't locate config file 'bentoml.yml'"
" in saved bundle in path: {}".format(bundle_path)
)
def load_bento_service_metadata(bundle_path: str) -> "BentoServiceMetadata":
return load_saved_bundle_config(bundle_path).get_bento_service_metadata_pb()
def _find_module_file(bundle_path, service_name, module_file):
# Simply join full path when module_file is just a file name,
# e.g. module_file=="iris_classifier.py"
module_file_path = os.path.join(bundle_path, service_name, module_file)
if not os.path.isfile(module_file_path):
# Try loading without service_name prefix, for loading from a installed PyPi
module_file_path = os.path.join(bundle_path, module_file)
# When module_file is located in sub directory
# e.g. module_file=="foo/bar/iris_classifier.py"
# This needs to handle the path differences between posix and windows platform:
if not os.path.isfile(module_file_path):
if sys.platform == "win32":
# Try load a saved bundle created from posix platform on windows
module_file_path = os.path.join(
bundle_path, service_name, str(PurePosixPath(module_file))
)
if not os.path.isfile(module_file_path):
module_file_path = os.path.join(
bundle_path, str(PurePosixPath(module_file))
)
else:
# Try load a saved bundle created from windows platform on posix
module_file_path = os.path.join(
bundle_path, service_name, PureWindowsPath(module_file).as_posix()
)
if not os.path.isfile(module_file_path):
module_file_path = os.path.join(
bundle_path, PureWindowsPath(module_file).as_posix()
)
if not os.path.isfile(module_file_path):
raise BentoMLException(
"Can not locate module_file {} in saved bundle {}".format(
module_file, bundle_path
)
)
return module_file_path
@resolve_remote_bundle
def load_bento_service_class(bundle_path):
"""
Load a BentoService class from saved bundle in given path
:param bundle_path: A path to Bento files generated from BentoService#save,
#save_to_dir, or the path to pip installed BentoService directory
:return: BentoService class
"""
config = load_saved_bundle_config(bundle_path)
metadata = config["metadata"]
# Find and load target module containing BentoService class from given path
module_file_path = _find_module_file(
bundle_path, metadata["service_name"], metadata["module_file"]
)
# Prepend bundle_path to sys.path for loading extra python dependencies
sys.path.insert(0, bundle_path)
sys.path.insert(0, os.path.join(bundle_path, metadata["service_name"]))
# Include zipimport modules
zipimport_dir = os.path.join(bundle_path, metadata["service_name"], ZIPIMPORT_DIR)
if os.path.exists(zipimport_dir):
for p in os.listdir(zipimport_dir):
logger.debug('adding %s to sys.path', p)
sys.path.insert(0, os.path.join(zipimport_dir, p))
module_name = metadata["module_name"]
if module_name in sys.modules:
logger.warning(
"Module `%s` already loaded, using existing imported module.", module_name
)
module = sys.modules[module_name]
elif sys.version_info >= (3, 5):
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
elif sys.version_info >= (3, 3):
from importlib.machinery import SourceFileLoader
# pylint:disable=deprecated-method
module = SourceFileLoader(module_name, module_file_path).load_module(
module_name
)
# pylint:enable=deprecated-method
else:
raise BentoMLException("BentoML requires Python 3.4 and above")
# Remove bundle_path from sys.path to avoid import naming conflicts
sys.path.remove(bundle_path)
model_service_class = module.__getattribute__(metadata["service_name"])
# Set _bento_service_bundle_path, where BentoService will load its artifacts
model_service_class._bento_service_bundle_path = bundle_path
# Set cls._version, service instance can access it via svc.version
model_service_class._bento_service_bundle_version = metadata["service_version"]
if (
model_service_class._env
and model_service_class._env._requirements_txt_file is not None
):
# Load `requirement.txt` from bundle directory instead of the user-provided
# file path, which may only available during the bundle save process
model_service_class._env._requirements_txt_file = os.path.join(
bundle_path, "requirements.txt"
)
return model_service_class
@resolve_remote_bundle
def safe_retrieve(bundle_path: str, target_dir: str):
"""Safely retrieve bento service to local path
Args:
bundle_path (:obj:`str`):
The path that contains saved BentoService bundle, supporting
both local file path and s3 path
target_dir (:obj:`str`):
Where the service contents should end up.
Returns:
:obj:`str`: location of safe local path
"""
shutil.copytree(bundle_path, target_dir)
@resolve_remote_bundle
def load_from_dir(bundle_path):
"""Load bento service from local file path or s3 path
Args:
bundle_path (str): The path that contains saved BentoService bundle,
supporting both local file path and s3 path
Returns:
bentoml.service.BentoService: a loaded BentoService instance
"""
svc_cls = load_bento_service_class(bundle_path)
return svc_cls()
@resolve_remote_bundle
def load_bento_service_api(bundle_path, api_name=None):
bento_service = load_from_dir(bundle_path)
return bento_service.get_inference_api(api_name)
| 35.487455
| 86
| 0.692051
| 1,287
| 9,901
| 5.094017
| 0.24087
| 0.089994
| 0.032032
| 0.024405
| 0.269372
| 0.202257
| 0.155583
| 0.109213
| 0.087401
| 0.087401
| 0
| 0.005518
| 0.23129
| 9,901
| 278
| 87
| 35.615108
| 0.855867
| 0.248965
| 0
| 0.174419
| 0
| 0
| 0.089266
| 0.009036
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.180233
| 0.011628
| 0.319767
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6adecc40e2158fa98b341e37dfb8d034335bed2b
| 1,267
|
py
|
Python
|
gen-post.py
|
younghk/younghk.netlify.com
|
605ab089252127c0b768d31afb027e8896ae33b4
|
[
"MIT"
] | null | null | null |
gen-post.py
|
younghk/younghk.netlify.com
|
605ab089252127c0b768d31afb027e8896ae33b4
|
[
"MIT"
] | null | null | null |
gen-post.py
|
younghk/younghk.netlify.com
|
605ab089252127c0b768d31afb027e8896ae33b4
|
[
"MIT"
] | null | null | null |
import os
import errno
from datetime import datetime
print("Generating A New Post\n")
post_name = input('Input Post Name: ')
date_time = datetime.now()
date_time_dir = date_time.strftime("%Y-%m-%d")
date_time_post = date_time.strftime("%Y-%m-%d %H:%M:%S")
p_name = post_name.replace(" ","-")
p_name = p_name.replace("[","")
p_name = p_name.replace("]","")
p_name = p_name.lower()
f_name = date_time_dir+"---"+p_name
dir = "./src/pages/articles/"+f_name+"/"
f_dir = dir+f_name+".md"
try:
if not(os.path.isdir(dir)):
os.makedirs(os.path.join(dir))
except OSError as e:
if e.errno != errno.EEXIST:
print("Failed to create directory!!!!!")
raise
print("Generating post : ",f_dir)
with open(f_dir, 'w') as f:
f.write('---')
f.write('\n')
f.write('draft: true')
f.write('\n')
f.write('title: \"'+post_name+'\"')
f.write('\n')
f.write('date: \"'+date_time_post+'\"')
f.write('\n')
f.write('layout: post')
f.write('\n')
f.write('path: \"/posts/'+p_name+'/\"')
f.write('\n')
f.write('category: \"\"')
f.write('\n')
f.write('tags: ')
f.write('\n')
f.write('description: ""')
f.write('\n')
f.write('---')
f.write('\n')
print("Done :)")
| 23.462963
| 56
| 0.561168
| 194
| 1,267
| 3.515464
| 0.314433
| 0.175953
| 0.102639
| 0.105572
| 0.35044
| 0.23607
| 0.080645
| 0.080645
| 0.080645
| 0.080645
| 0
| 0
| 0.195738
| 1,267
| 54
| 57
| 23.462963
| 0.669284
| 0
| 0
| 0.266667
| 0
| 0
| 0.235804
| 0.016562
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0.088889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ae0041ec06abb5f41acc8d9e0ad54c9727be449
| 39,758
|
py
|
Python
|
rmgpy/reactionTest.py
|
Lyle-zhang/RMG-Py
|
273eb51fa3c175562056c85d7d61814d5fa2986d
|
[
"MIT"
] | null | null | null |
rmgpy/reactionTest.py
|
Lyle-zhang/RMG-Py
|
273eb51fa3c175562056c85d7d61814d5fa2986d
|
[
"MIT"
] | null | null | null |
rmgpy/reactionTest.py
|
Lyle-zhang/RMG-Py
|
273eb51fa3c175562056c85d7d61814d5fa2986d
|
[
"MIT"
] | 1
|
2021-08-14T13:47:18.000Z
|
2021-08-14T13:47:18.000Z
|
#!/usr/bin/env python
# encoding: utf-8 -*-
"""
This module contains unit tests of the rmgpy.reaction module.
"""
import numpy
import unittest
from external.wip import work_in_progress
from rmgpy.species import Species, TransitionState
from rmgpy.reaction import Reaction
from rmgpy.statmech.translation import Translation, IdealGasTranslation
from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor
from rmgpy.statmech.vibration import Vibration, HarmonicOscillator
from rmgpy.statmech.torsion import Torsion, HinderedRotor
from rmgpy.statmech.conformer import Conformer
from rmgpy.kinetics import Arrhenius
from rmgpy.thermo import Wilhoit
import rmgpy.constants as constants
################################################################################
class PseudoSpecies:
"""
Can be used in place of a :class:`rmg.species.Species` for isomorphism checks.
PseudoSpecies('a') is isomorphic with PseudoSpecies('A')
but nothing else.
"""
def __init__(self, label):
self.label = label
def __repr__(self):
return "PseudoSpecies('{0}')".format(self.label)
def __str__(self):
return self.label
def isIsomorphic(self, other):
return self.label.lower() == other.label.lower()
class TestReactionIsomorphism(unittest.TestCase):
"""
Contains unit tests of the isomorphism testing of the Reaction class.
"""
def makeReaction(self,reaction_string):
""""
Make a Reaction (containing PseudoSpecies) of from a string like 'Ab=CD'
"""
reactants, products = reaction_string.split('=')
reactants = [PseudoSpecies(i) for i in reactants]
products = [PseudoSpecies(i) for i in products]
return Reaction(reactants=reactants, products=products)
def test1to1(self):
r1 = self.makeReaction('A=B')
self.assertTrue(r1.isIsomorphic(self.makeReaction('a=B')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('b=A')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('B=a'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('A=C')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('A=BB')))
def test1to2(self):
r1 = self.makeReaction('A=BC')
self.assertTrue(r1.isIsomorphic(self.makeReaction('a=Bc')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('cb=a')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('a=cb'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('bc=a'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('a=c')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=c')))
def test2to2(self):
r1 = self.makeReaction('AB=CD')
self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cd')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=dc'),eitherDirection=False))
self.assertTrue(r1.isIsomorphic(self.makeReaction('dc=ba')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('cd=ab'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=ab')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=cde')))
def test2to3(self):
r1 = self.makeReaction('AB=CDE')
self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cde')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('ba=edc'),eitherDirection=False))
self.assertTrue(r1.isIsomorphic(self.makeReaction('dec=ba')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('cde=ab'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=abc')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('abe=cde')))
class TestReaction(unittest.TestCase):
"""
Contains unit tests of the Reaction class.
"""
def setUp(self):
"""
A method that is called prior to each unit test in this class.
"""
ethylene = Species(
label = 'C2H4',
conformer = Conformer(
E0 = (44.7127, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (28.0313, 'amu'),
),
NonlinearRotor(
inertia = (
[3.41526, 16.6498, 20.065],
'amu*angstrom^2',
),
symmetry = 4,
),
HarmonicOscillator(
frequencies = (
[828.397, 970.652, 977.223, 1052.93, 1233.55, 1367.56, 1465.09, 1672.25, 3098.46, 3111.7, 3165.79, 3193.54],
'cm^-1',
),
),
],
spinMultiplicity = 1,
opticalIsomers = 1,
),
)
hydrogen = Species(
label = 'H',
conformer = Conformer(
E0 = (211.794, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (1.00783, 'amu'),
),
],
spinMultiplicity = 2,
opticalIsomers = 1,
),
)
ethyl = Species(
label = 'C2H5',
conformer = Conformer(
E0 = (111.603, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (29.0391, 'amu'),
),
NonlinearRotor(
inertia = (
[4.8709, 22.2353, 23.9925],
'amu*angstrom^2',
),
symmetry = 1,
),
HarmonicOscillator(
frequencies = (
[482.224, 791.876, 974.355, 1051.48, 1183.21, 1361.36, 1448.65, 1455.07, 1465.48, 2688.22, 2954.51, 3033.39, 3101.54, 3204.73],
'cm^-1',
),
),
HinderedRotor(
inertia = (1.11481, 'amu*angstrom^2'),
symmetry = 6,
barrier = (0.244029, 'kJ/mol'),
semiclassical = None,
),
],
spinMultiplicity = 2,
opticalIsomers = 1,
),
)
TS = TransitionState(
label = 'TS',
conformer = Conformer(
E0 = (266.694, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (29.0391, 'amu'),
),
NonlinearRotor(
inertia = (
[6.78512, 22.1437, 22.2114],
'amu*angstrom^2',
),
symmetry = 1,
),
HarmonicOscillator(
frequencies = (
[412.75, 415.206, 821.495, 924.44, 982.714, 1024.16, 1224.21, 1326.36, 1455.06, 1600.35, 3101.46, 3110.55, 3175.34, 3201.88],
'cm^-1',
),
),
],
spinMultiplicity = 2,
opticalIsomers = 1,
),
frequency = (-750.232, 'cm^-1'),
)
self.reaction = Reaction(
reactants = [hydrogen, ethylene],
products = [ethyl],
kinetics = Arrhenius(
A = (501366000.0, 'cm^3/(mol*s)'),
n = 1.637,
Ea = (4.32508, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (300, 'K'),
Tmax = (2500, 'K'),
),
transitionState = TS,
)
# CC(=O)O[O]
acetylperoxy = Species(
label='acetylperoxy',
thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(21.0*constants.R,"J/(mol*K)"), a0=-3.95, a1=9.26, a2=-15.6, a3=8.55, B=(500.0,"K"), H0=(-6.151e+04,"J/mol"), S0=(-790.2,"J/(mol*K)")),
)
# C[C]=O
acetyl = Species(
label='acetyl',
thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(15.5*constants.R,"J/(mol*K)"), a0=0.2541, a1=-0.4712, a2=-4.434, a3=2.25, B=(500.0,"K"), H0=(-1.439e+05,"J/mol"), S0=(-524.6,"J/(mol*K)")),
)
# [O][O]
oxygen = Species(
label='oxygen',
thermo=Wilhoit(Cp0=(3.5*constants.R,"J/(mol*K)"), CpInf=(4.5*constants.R,"J/(mol*K)"), a0=-0.9324, a1=26.18, a2=-70.47, a3=44.12, B=(500.0,"K"), H0=(1.453e+04,"J/mol"), S0=(-12.19,"J/(mol*K)")),
)
self.reaction2 = Reaction(
reactants=[acetyl, oxygen],
products=[acetylperoxy],
kinetics = Arrhenius(
A = (2.65e12, 'cm^3/(mol*s)'),
n = 0.0,
Ea = (0.0, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (300, 'K'),
Tmax = (2000, 'K'),
),
)
def testIsIsomerization(self):
"""
Test the Reaction.isIsomerization() method.
"""
isomerization = Reaction(reactants=[Species()], products=[Species()])
association = Reaction(reactants=[Species(),Species()], products=[Species()])
dissociation = Reaction(reactants=[Species()], products=[Species(),Species()])
bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()])
self.assertTrue(isomerization.isIsomerization())
self.assertFalse(association.isIsomerization())
self.assertFalse(dissociation.isIsomerization())
self.assertFalse(bimolecular.isIsomerization())
def testIsAssociation(self):
"""
Test the Reaction.isAssociation() method.
"""
isomerization = Reaction(reactants=[Species()], products=[Species()])
association = Reaction(reactants=[Species(),Species()], products=[Species()])
dissociation = Reaction(reactants=[Species()], products=[Species(),Species()])
bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()])
self.assertFalse(isomerization.isAssociation())
self.assertTrue(association.isAssociation())
self.assertFalse(dissociation.isAssociation())
self.assertFalse(bimolecular.isAssociation())
def testIsDissociation(self):
"""
Test the Reaction.isDissociation() method.
"""
isomerization = Reaction(reactants=[Species()], products=[Species()])
association = Reaction(reactants=[Species(),Species()], products=[Species()])
dissociation = Reaction(reactants=[Species()], products=[Species(),Species()])
bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()])
self.assertFalse(isomerization.isDissociation())
self.assertFalse(association.isDissociation())
self.assertTrue(dissociation.isDissociation())
self.assertFalse(bimolecular.isDissociation())
def testHasTemplate(self):
"""
Test the Reaction.hasTemplate() method.
"""
reactants = self.reaction.reactants[:]
products = self.reaction.products[:]
self.assertTrue(self.reaction.hasTemplate(reactants, products))
self.assertTrue(self.reaction.hasTemplate(products, reactants))
self.assertFalse(self.reaction2.hasTemplate(reactants, products))
self.assertFalse(self.reaction2.hasTemplate(products, reactants))
reactants.reverse()
products.reverse()
self.assertTrue(self.reaction.hasTemplate(reactants, products))
self.assertTrue(self.reaction.hasTemplate(products, reactants))
self.assertFalse(self.reaction2.hasTemplate(reactants, products))
self.assertFalse(self.reaction2.hasTemplate(products, reactants))
reactants = self.reaction2.reactants[:]
products = self.reaction2.products[:]
self.assertFalse(self.reaction.hasTemplate(reactants, products))
self.assertFalse(self.reaction.hasTemplate(products, reactants))
self.assertTrue(self.reaction2.hasTemplate(reactants, products))
self.assertTrue(self.reaction2.hasTemplate(products, reactants))
reactants.reverse()
products.reverse()
self.assertFalse(self.reaction.hasTemplate(reactants, products))
self.assertFalse(self.reaction.hasTemplate(products, reactants))
self.assertTrue(self.reaction2.hasTemplate(reactants, products))
self.assertTrue(self.reaction2.hasTemplate(products, reactants))
def testEnthalpyOfReaction(self):
"""
Test the Reaction.getEnthalpyOfReaction() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Hlist0 = [float(v) for v in ['-146007', '-145886', '-144195', '-141973', '-139633', '-137341', '-135155', '-133093', '-131150', '-129316']]
Hlist = self.reaction2.getEnthalpiesOfReaction(Tlist)
for i in range(len(Tlist)):
self.assertAlmostEqual(Hlist[i] / 1000., Hlist0[i] / 1000., 2)
def testEntropyOfReaction(self):
"""
Test the Reaction.getEntropyOfReaction() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Slist0 = [float(v) for v in ['-156.793', '-156.872', '-153.504', '-150.317', '-147.707', '-145.616', '-143.93', '-142.552', '-141.407', '-140.441']]
Slist = self.reaction2.getEntropiesOfReaction(Tlist)
for i in range(len(Tlist)):
self.assertAlmostEqual(Slist[i], Slist0[i], 2)
def testFreeEnergyOfReaction(self):
"""
Test the Reaction.getFreeEnergyOfReaction() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Glist0 = [float(v) for v in ['-114648', '-83137.2', '-52092.4', '-21719.3', '8073.53', '37398.1', '66346.8', '94990.6', '123383', '151565']]
Glist = self.reaction2.getFreeEnergiesOfReaction(Tlist)
for i in range(len(Tlist)):
self.assertAlmostEqual(Glist[i] / 1000., Glist0[i] / 1000., 2)
def testEquilibriumConstantKa(self):
"""
Test the Reaction.getEquilibriumConstant() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Kalist0 = [float(v) for v in ['8.75951e+29', '7.1843e+10', '34272.7', '26.1877', '0.378696', '0.0235579', '0.00334673', '0.000792389', '0.000262777', '0.000110053']]
Kalist = self.reaction2.getEquilibriumConstants(Tlist, type='Ka')
for i in range(len(Tlist)):
self.assertAlmostEqual(Kalist[i] / Kalist0[i], 1.0, 4)
def testEquilibriumConstantKc(self):
"""
Test the Reaction.getEquilibriumConstant() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Kclist0 = [float(v) for v in ['1.45661e+28', '2.38935e+09', '1709.76', '1.74189', '0.0314866', '0.00235045', '0.000389568', '0.000105413', '3.93273e-05', '1.83006e-05']]
Kclist = self.reaction2.getEquilibriumConstants(Tlist, type='Kc')
for i in range(len(Tlist)):
self.assertAlmostEqual(Kclist[i] / Kclist0[i], 1.0, 4)
def testEquilibriumConstantKp(self):
"""
Test the Reaction.getEquilibriumConstant() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Kplist0 = [float(v) for v in ['8.75951e+24', '718430', '0.342727', '0.000261877', '3.78696e-06', '2.35579e-07', '3.34673e-08', '7.92389e-09', '2.62777e-09', '1.10053e-09']]
Kplist = self.reaction2.getEquilibriumConstants(Tlist, type='Kp')
for i in range(len(Tlist)):
self.assertAlmostEqual(Kplist[i] / Kplist0[i], 1.0, 4)
def testStoichiometricCoefficient(self):
"""
Test the Reaction.getStoichiometricCoefficient() method.
"""
for reactant in self.reaction.reactants:
self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), -1)
for product in self.reaction.products:
self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 1)
for reactant in self.reaction2.reactants:
self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), 0)
for product in self.reaction2.products:
self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 0)
def testRateCoefficient(self):
"""
Test the Reaction.getRateCoefficient() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
self.assertAlmostEqual(self.reaction.getRateCoefficient(T, P) / self.reaction.kinetics.getRateCoefficient(T), 1.0, 6)
def testGenerateReverseRateCoefficient(self):
"""
Test the Reaction.generateReverseRateCoefficient() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
P = 1e5
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
for T in Tlist:
kr0 = self.reaction2.getRateCoefficient(T, P) / self.reaction2.getEquilibriumConstant(T)
kr = reverseKinetics.getRateCoefficient(T)
self.assertAlmostEqual(kr0 / kr, 1.0, 0)
def testGenerateReverseRateCoefficientArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the Arrhenius format.
"""
original_kinetics = Arrhenius(
A = (2.65e12, 'cm^3/(mol*s)'),
n = 0.0,
Ea = (0.0, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (300, 'K'),
Tmax = (2000, 'K'),
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(original_kinetics.Tmin.value_si, original_kinetics.Tmax.value_si, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
@work_in_progress
def testGenerateReverseRateCoefficientArrheniusEP(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the ArrheniusEP format.
"""
from rmgpy.kinetics import ArrheniusEP
original_kinetics = ArrheniusEP(
A = (2.65e12, 'cm^3/(mol*s)'),
n = 0.0,
alpha = 0.5,
E0 = (41.84, 'kJ/mol'),
Tmin = (300, 'K'),
Tmax = (2000, 'K'),
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(original_kinetics.Tmin, original_kinetics.Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientPDepArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the PDepArrhenius format.
"""
from rmgpy.kinetics import PDepArrhenius
arrhenius0 = Arrhenius(
A = (1.0e6,"s^-1"),
n = 1.0,
Ea = (10.0,"kJ/mol"),
T0 = (300.0,"K"),
Tmin = (300.0,"K"),
Tmax = (2000.0,"K"),
comment = """This data is completely made up""",
)
arrhenius1 = Arrhenius(
A = (1.0e12,"s^-1"),
n = 1.0,
Ea = (20.0,"kJ/mol"),
T0 = (300.0,"K"),
Tmin = (300.0,"K"),
Tmax = (2000.0,"K"),
comment = """This data is completely made up""",
)
pressures = numpy.array([0.1, 10.0])
arrhenius = [arrhenius0, arrhenius1]
Tmin = 300.0
Tmax = 2000.0
Pmin = 0.1
Pmax = 10.0
comment = """This data is completely made up"""
original_kinetics = PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientMultiArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the MultiArrhenius format.
"""
from rmgpy.kinetics import MultiArrhenius
pressures = numpy.array([0.1, 10.0])
Tmin = 300.0
Tmax = 2000.0
Pmin = 0.1
Pmax = 10.0
comment = """This data is completely made up"""
arrhenius = [
Arrhenius(
A = (9.3e-14,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (1.4e-9,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
]
original_kinetics = MultiArrhenius(
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientMultiPDepArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the MultiPDepArrhenius format.
"""
from rmgpy.kinetics import PDepArrhenius, MultiPDepArrhenius
Tmin = 350.
Tmax = 1500.
Pmin = 1e-1
Pmax = 1e1
pressures = numpy.array([1e-1,1e1])
comment = 'CH3 + C2H6 <=> CH4 + C2H5 (Baulch 2005)'
arrhenius = [
PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = [
Arrhenius(
A = (9.3e-16,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (9.3e-14,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
],
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
),
PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = [
Arrhenius(
A = (1.4e-11,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (1.4e-9,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
],
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
),
]
original_kinetics = MultiPDepArrhenius(
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientThirdBody(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the ThirdBody format.
"""
from rmgpy.kinetics import ThirdBody
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
thirdBody = ThirdBody(
arrheniusLow = arrheniusLow,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = thirdBody
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientLindemann(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the Lindemann format.
"""
from rmgpy.kinetics import Lindemann
arrheniusHigh = Arrhenius(
A = (1.39e+16,"cm^3/(mol*s)"),
n = -0.534,
Ea = (2.243,"kJ/mol"),
T0 = (1,"K"),
)
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
lindemann = Lindemann(
arrheniusHigh = arrheniusHigh,
arrheniusLow = arrheniusLow,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = lindemann
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientTroe(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the Troe format.
"""
from rmgpy.kinetics import Troe
arrheniusHigh = Arrhenius(
A = (1.39e+16,"cm^3/(mol*s)"),
n = -0.534,
Ea = (2.243,"kJ/mol"),
T0 = (1,"K"),
)
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
alpha = 0.783
T3 = 74
T1 = 2941
T2 = 6964
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
troe = Troe(
arrheniusHigh = arrheniusHigh,
arrheniusLow = arrheniusLow,
alpha = alpha,
T3 = (T3,"K"),
T1 = (T1,"K"),
T2 = (T2,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = troe
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testTSTCalculation(self):
"""
A test of the transition state theory k(T) calculation function,
using the reaction H + C2H4 -> C2H5.
"""
Tlist = 1000.0/numpy.arange(0.4, 3.35, 0.01)
klist = numpy.array([self.reaction.calculateTSTRateCoefficient(T) for T in Tlist])
arrhenius = Arrhenius().fitToData(Tlist, klist, kunits='m^3/(mol*s)')
klist2 = numpy.array([arrhenius.getRateCoefficient(T) for T in Tlist])
# Check that the correct Arrhenius parameters are returned
self.assertAlmostEqual(arrhenius.A.value_si, 2265.2488, delta=1e-2)
self.assertAlmostEqual(arrhenius.n.value_si, 1.45419, delta=1e-4)
self.assertAlmostEqual(arrhenius.Ea.value_si, 6645.24, delta=1e-2)
# Check that the fit is satisfactory (defined here as always within 5%)
for i in range(len(Tlist)):
self.assertAlmostEqual(klist[i], klist2[i], delta=5e-2 * klist[i])
def testPickle(self):
"""
Test that a Reaction object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
reaction = cPickle.loads(cPickle.dumps(self.reaction,-1))
self.assertEqual(len(self.reaction.reactants), len(reaction.reactants))
self.assertEqual(len(self.reaction.products), len(reaction.products))
for reactant0, reactant in zip(self.reaction.reactants, reaction.reactants):
self.assertAlmostEqual(reactant0.conformer.E0.value_si / 1e6, reactant.conformer.E0.value_si / 1e6, 2)
self.assertEqual(reactant0.conformer.E0.units, reactant.conformer.E0.units)
for product0, product in zip(self.reaction.products, reaction.products):
self.assertAlmostEqual(product0.conformer.E0.value_si / 1e6, product.conformer.E0.value_si / 1e6, 2)
self.assertEqual(product0.conformer.E0.units, product.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, reaction.transitionState.conformer.E0.value_si / 1e6, 2)
self.assertEqual(self.reaction.transitionState.conformer.E0.units, reaction.transitionState.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, reaction.transitionState.frequency.value_si, 2)
self.assertEqual(self.reaction.transitionState.frequency.units, reaction.transitionState.frequency.units)
self.assertAlmostEqual(self.reaction.kinetics.A.value_si, reaction.kinetics.A.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.n.value_si, reaction.kinetics.n.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.T0.value_si, reaction.kinetics.T0.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.Ea.value_si, reaction.kinetics.Ea.value_si, delta=1e-6)
self.assertEqual(self.reaction.kinetics.comment, reaction.kinetics.comment)
self.assertEqual(self.reaction.duplicate, reaction.duplicate)
self.assertEqual(self.reaction.degeneracy, reaction.degeneracy)
def testOutput(self):
"""
Test that a Reaction object can be successfully reconstructed
from its repr() output with no loss of information.
"""
exec('reaction = %r' % (self.reaction))
self.assertEqual(len(self.reaction.reactants), len(reaction.reactants))
self.assertEqual(len(self.reaction.products), len(reaction.products))
for reactant0, reactant in zip(self.reaction.reactants, reaction.reactants):
self.assertAlmostEqual(reactant0.conformer.E0.value_si / 1e6, reactant.conformer.E0.value_si / 1e6, 2)
self.assertEqual(reactant0.conformer.E0.units, reactant.conformer.E0.units)
for product0, product in zip(self.reaction.products, reaction.products):
self.assertAlmostEqual(product0.conformer.E0.value_si / 1e6, product.conformer.E0.value_si / 1e6, 2)
self.assertEqual(product0.conformer.E0.units, product.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, reaction.transitionState.conformer.E0.value_si / 1e6, 2)
self.assertEqual(self.reaction.transitionState.conformer.E0.units, reaction.transitionState.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, reaction.transitionState.frequency.value_si, 2)
self.assertEqual(self.reaction.transitionState.frequency.units, reaction.transitionState.frequency.units)
self.assertAlmostEqual(self.reaction.kinetics.A.value_si, reaction.kinetics.A.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.n.value_si, reaction.kinetics.n.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.T0.value_si, reaction.kinetics.T0.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.Ea.value_si, reaction.kinetics.Ea.value_si, delta=1e-6)
self.assertEqual(self.reaction.kinetics.comment, reaction.kinetics.comment)
self.assertEqual(self.reaction.duplicate, reaction.duplicate)
self.assertEqual(self.reaction.degeneracy, reaction.degeneracy)
################################################################################
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| 42.567452
| 208
| 0.566452
| 4,000
| 39,758
| 5.6075
| 0.14475
| 0.049844
| 0.018457
| 0.030762
| 0.702185
| 0.672893
| 0.657289
| 0.61975
| 0.600713
| 0.566295
| 0
| 0.069843
| 0.303159
| 39,758
| 933
| 209
| 42.613076
| 0.739758
| 0.082575
| 0
| 0.626591
| 0
| 0
| 0.046929
| 0
| 0
| 0
| 0
| 0
| 0.154173
| 1
| 0.048091
| false
| 0
| 0.029703
| 0.004243
| 0.087694
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ae09675e3f04c208d0aada0fe5dc7452f3a90fa
| 9,402
|
py
|
Python
|
python/video_ADG.py
|
alexberndt/mobile-AGV-optimization
|
76b97fd5aa3898fd6cb6f74f8d87140555c92af5
|
[
"MIT"
] | 2
|
2021-12-22T03:07:08.000Z
|
2022-03-19T09:41:29.000Z
|
python/video_ADG.py
|
alexberndt/mobile-AGV-optimization
|
76b97fd5aa3898fd6cb6f74f8d87140555c92af5
|
[
"MIT"
] | null | null | null |
python/video_ADG.py
|
alexberndt/mobile-AGV-optimization
|
76b97fd5aa3898fd6cb6f74f8d87140555c92af5
|
[
"MIT"
] | 1
|
2021-11-22T10:58:38.000Z
|
2021-11-22T10:58:38.000Z
|
"""
closed-loop MILP solved to determine optimal ordering defined by ADG
"""
import sys
import yaml
import time
import matplotlib.colors as mcolors
import matplotlib
import matplotlib.pyplot as plt
import random
import logging
import time
import networkx as nx
import csv
import statistics as stat
import os
import sys
from mip import Model, ProgressLog, xsum, maximize, minimize, BINARY, CONTINUOUS, Constr, ConstrList
sys.path.insert(1, "functions/")
from planners import *
from visualizers import *
from milp_formulation import *
from robot import *
from adg import *
from adg_node import *
from process_results import *
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(name)s - %(levelname)s :: %(message)s', level=logging.INFO)
def main():
""" --------------------------- INPUTS --------------------------------- """
show_visual = False
show_ADG = True #not show_visual
run_MILP = True #False #True
save_file = False
sim_timeout = 500
# define prediction and control horizons: H_prediction >= H_control
H_prediction = np.NaN # integer value for forward node lookup
H_control = 5
random_seed = 0
mu = 0.5
robust_param = 0.0
delay_amount = 5
delayed_robot_cnt = 2
w = 1.4 # sub-optimality bound: w = 1.0 -> CBS, else ECBS!
fldr = "nuernberg_small" # auto_gen_01_nuernberg | auto_gen_00_large | auto_gen_02_simple | manual_03_maxplus
random.seed(random_seed)
np.random.seed(random_seed)
""" -------------------------------------------------------------------- """
# start initial
pwd = os.path.dirname(os.path.abspath(__file__))
logger.info(pwd)
map_file = pwd + "/data/" + fldr + "/csv_map_yaml.yaml"
robot_file = pwd + "/data/" + fldr + "/csv_robots_yaml.yaml"
robot_file_tmp = pwd + "/data/tmp/robots.yaml"
start_time = time.time()
plans = run_CBS(map_file, robot_file, w=w) # if w > 1.0, run_CBS uses ECBS!
logger.info(" with sub-optimality w={}".format(w))
logger.info(" plan statistics: {} \n".format(plans["statistics"]))
logger.debug(plans["schedule"])
# show factory map
# show_factory_map(map_file, robot_file, True)
# plt.show()
map_gen_robot_count = 10
map_gen_seedval = "NaN"
try:
map_gen_robot_count = int(sys.argv[1])
map_gen_seedval = int(sys.argv[2])
H_control = int(sys.argv[3])
robust_param = int(sys.argv[4])
random.seed(map_gen_seedval) # map_gen_seedval
np.random.seed(map_gen_seedval) # map_gen_seedval
except:
print(" no valid inputs given, ignoring ...")
# determine ADG, reverse ADG and dependency groups
ADG, robot_plan, goal_positions = determine_ADG(plans, show_graph=False)
nodes_all, edges_type_1, dependency_groups = analyze_ADG(ADG, plans, show_graph=False)
ADG_reverse = ADG.reverse(copy=False)
# initialize simulation
robots = []
solve_time = []
robots_done = []
time_to_goal = {}
colors = plt.cm.rainbow( np.arange(len(robot_plan))/len(robot_plan) )
for robot_id in robot_plan:
plan = robot_plan[robot_id]
logger.debug("Robot {} - plan: {} \t \t positions: {}".format(robot_id, plan["nodes"], plan["positions"]))
new_robot = Robot(robot_id, plan, colors[robot_id], goal_positions[robot_id])
robots.append(new_robot)
robots_done.append(False)
time_to_goal[robot_id] = 0
if show_visual:
visualizer = Visualizer(map_file, robots)
# initialize optimization MIP object m_opt
m_opt = Model('MILP_sequence', solver='CBC')
# print(m_opt.max_nodes)
pl_opt = ProgressLog()
# pl_opt.settings = "objective_value"
# print("pl_opt.settings: {}".format(pl_opt.settings))
# print("pl_opt.log: {}".format(pl_opt.log))
# pl_opt.instance = m_opt.name
# print("pl_opt.instance: {}".format(pl_opt.instance))
ADG_fig = plt.figure(figsize=(12,8))
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
metadata = dict(title='Movie Test', artist='Matplotlib',
comment='Movie support!')
writer = FFMpegWriter(fps=2, metadata=metadata)
with writer.saving(ADG_fig, "ADG_video.mp4", 500):
# run a simulation in time
k = 0
robot_IDs_to_delay = []
while (not all(robots_done)) and (k < sim_timeout):
print("pl_opt.log: {}".format(pl_opt.log))
m_opt.clear()
# show current robot status
logger.info("-------------------- @ time step k = {} --------------------".format(k))
for robot in robots:
node_info = ADG.node[robot.current_node]["data"]
logger.debug(" - Robot {} # {} @ {} => status: {}".format(robot.robot_ID, node_info.ID, node_info.s_loc, robot.status))
# solve MILP for the advanced ADG to potentially adjust ordering
res, solve_t = solve_MILP(robots, dependency_groups, ADG, ADG_reverse, H_control, H_prediction, m_opt, pl_opt, run=run_MILP, uncertainty_bound=robust_param)
solve_time.append(solve_t)
if not (res is None or res == "OptimizationStatus.OPTIMAL"):
ValueError("Optimization NOT optimal")
# ADG after MILP
if show_ADG:
#
draw_ADG(ADG, robots, "ADG after MILP ADG | k = {}".format(k), writer=writer)
# plt.show()
# check for cycles
try:
nx.find_cycle(ADG, orientation="original")
logger.warning("Cycle detected!!")
raise Exception("ADG has a cycle => deadlock! something is wrong with optimization")
except nx.NetworkXNoCycle:
logger.debug("no cycle detected in ADG => no deadlock. good!")
pass
if (k % delay_amount) == 0:
robot_IDs = np.arange(map_gen_robot_count)
robot_IDs_to_delay = np.random.choice(map_gen_robot_count, size=delayed_robot_cnt, replace=False)
logger.info("delaying robots (ID): {}".format(robot_IDs_to_delay))
# Advance robots if possible (dependencies have been met)
for robot in robots:
# check if all dependencies have been met, to advance to next node
node_info = ADG.node[robot.current_node]["data"]
node_dependencies_list = list(ADG_reverse.neighbors(robot.current_node))
all_dependencies_completed = True
for dependency in node_dependencies_list:
if (ADG.node[dependency]["data"].status != Status.FINISHED):
all_dependencies_completed = False
# if all dependencies are completed, the robot can advance!
# delay_amount = np.random.poisson(mu) # same sample every time
if all_dependencies_completed and k > 0: # (robot.robot_ID == 2 or k > 5)
if (not (robot.robot_ID in robot_IDs_to_delay)): # or (k < 10 or k > 20)): # or (robot.robot_ID == 3 or k > 8):
ADG.node[robot.current_node]["data"].status = Status.FINISHED
robot.advance()
if not robot.is_done():
time_to_goal[robot.robot_ID] += 1
else:
robots_done[robot.robot_ID] = True
if show_visual:
visualizer.redraw(robots, pause_length=0.1)
# return 0
k += 1
# end of while loop
total_time = 0
for idx, t in time_to_goal.items():
total_time += t
logger.info("Total time to complete missions: {}".format(total_time))
logger.info("horizon = {}".format(H_control))
logger.info("")
logger.info("Computation time:")
logger.info(" - max: {}".format(max(solve_time)))
logger.info(" - avg: {}".format(stat.mean(solve_time)))
# create data to save to YAML file
simulation_results = {}
simulation_results["parameters"] = {}
simulation_results["parameters"]["H_control"] = H_control
simulation_results["parameters"]["random seed"] = random_seed
simulation_results["parameters"]["ECBS w"] = w
simulation_results["parameters"]["mu"] = mu
simulation_results["parameters"]["robust param"] = robust_param
simulation_results["parameters"]["delay amount"] = delay_amount
simulation_results["map details"] = {}
simulation_results["map details"]["robot_count"] = map_gen_robot_count
simulation_results["map details"]["seed val"] = map_gen_seedval
simulation_results["results"] = {}
simulation_results["results"]["comp time"] = {}
simulation_results["results"]["comp time"]["solve_time"] = [solve_time]
simulation_results["results"]["comp time"]["max"] = max(solve_time)
simulation_results["results"]["comp time"]["avg"] = stat.mean(solve_time)
simulation_results["results"]["total time"] = total_time
logger.info(simulation_results)
file_name = pwd + "/results/robust_" +str(delayed_robot_cnt) + "x" + str(delay_amount) + "/res_robots_" + str(map_gen_robot_count) + "_horizon_" + str(H_control) + "_mapseed_" + str(map_gen_seedval) + "_robustparam_" + str(robust_param) + ".yaml"
if save_file:
save_to_yaml(simulation_results, file_name)
if __name__ == "__main__":
main()
| 40.008511
| 251
| 0.623591
| 1,199
| 9,402
| 4.654712
| 0.251043
| 0.057875
| 0.018635
| 0.017201
| 0.086364
| 0.0611
| 0.049812
| 0.035119
| 0
| 0
| 0
| 0.009113
| 0.241332
| 9,402
| 234
| 252
| 40.179487
| 0.773307
| 0.153903
| 0
| 0.073171
| 0
| 0
| 0.147679
| 0.008695
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006098
| false
| 0.006098
| 0.134146
| 0
| 0.140244
| 0.012195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ae26b063b0fbd07c2ce06161f218674d84af1d4
| 1,119
|
py
|
Python
|
ice/consoles.py
|
reavessm/Ice
|
e78d046abfd6006b1a81d1cbdb516b7c3e141ac9
|
[
"MIT"
] | 578
|
2015-01-02T12:43:52.000Z
|
2022-03-27T23:45:32.000Z
|
ice/consoles.py
|
reavessm/Ice
|
e78d046abfd6006b1a81d1cbdb516b7c3e141ac9
|
[
"MIT"
] | 271
|
2015-01-05T01:56:38.000Z
|
2021-08-14T02:51:24.000Z
|
ice/consoles.py
|
reavessm/Ice
|
e78d046abfd6006b1a81d1cbdb516b7c3e141ac9
|
[
"MIT"
] | 156
|
2015-01-07T15:43:20.000Z
|
2021-12-11T19:10:44.000Z
|
# encoding: utf-8
import os
import roms
def console_roms_directory(configuration, console):
"""
If the user has specified a custom ROMs directory in consoles.txt then
return that.
Otherwise, append the shortname of the console to the default ROMs
directory given by config.txt.
"""
if console.custom_roms_directory:
return console.custom_roms_directory
return os.path.join(roms.roms_directory(configuration), console.shortname)
def path_is_rom(console, path):
"""
This function determines if a given path is actually a valid ROM file.
If a list of extensions is supplied for this console, we check if the path has a valid extension
If no extensions are defined for this console, we just accept any file
"""
if console.extensions == "":
return True
# Normalize the extension based on the things we validly ignore.
# Aka capitalization, whitespace, and leading dots
normalize = lambda ext: ext.lower().strip().lstrip('.')
(name, ext) = os.path.splitext(path)
valid_extensions = console.extensions.split(',')
return normalize(ext) in map(normalize, valid_extensions)
| 31.971429
| 98
| 0.747096
| 162
| 1,119
| 5.092593
| 0.475309
| 0.094545
| 0.069091
| 0.08
| 0.077576
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001081
| 0.173369
| 1,119
| 34
| 99
| 32.911765
| 0.890811
| 0.49151
| 0
| 0
| 0
| 0
| 0.003745
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ae285af81cb46f32301f55fbf5e2dcaee2e26e6
| 5,527
|
py
|
Python
|
clue/c3.py
|
dumpmemory/roformer-v2
|
95b71ae03b8bb910998285e194d7752b1e4104c0
|
[
"Apache-2.0"
] | 44
|
2022-03-17T02:58:27.000Z
|
2022-03-31T13:08:29.000Z
|
clue/c3.py
|
dumpmemory/roformer-v2
|
95b71ae03b8bb910998285e194d7752b1e4104c0
|
[
"Apache-2.0"
] | null | null | null |
clue/c3.py
|
dumpmemory/roformer-v2
|
95b71ae03b8bb910998285e194d7752b1e4104c0
|
[
"Apache-2.0"
] | 2
|
2022-03-17T05:47:06.000Z
|
2022-03-22T10:33:54.000Z
|
#! -*- coding:utf-8 -*-
# CLUE评测
# c3多项选择阅读理解
# 思路:每个选项分别与问题、篇章拼接后打分排序
import json
import numpy as np
from snippets import *
from bert4keras.backend import keras
from bert4keras.snippets import sequence_padding, DataGenerator
from bert4keras.snippets import open
from bert4keras.snippets import truncate_sequences
from tqdm import tqdm
# 基本参数
num_classes = 4
maxlen = 512
batch_size = 4
epochs = 10
def load_data(filename):
"""加载数据
格式:[(篇章, 问题, 选项, 答案id)]
"""
D = []
with open(filename) as f:
data = json.load(f)
for d in data:
p = u'||'.join(d[0])
for qa in d[1]:
q = qa['question']
while len(qa['choice']) < num_classes:
qa['choice'].append(u'无效答案')
c = qa['choice'][:num_classes]
if 'answer' in qa:
a = qa['choice'].index(qa['answer'])
else:
a = 0
D.append((p, q, c, a))
return D
# 加载数据集
train_data = load_data(data_path + 'c3/m-train.json')
train_data += load_data(data_path + 'c3/d-train.json')
valid_data = load_data(data_path + 'c3/m-dev.json')
valid_data += load_data(data_path + 'c3/d-dev.json')
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=False):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for is_end, (p, q, cs, a) in self.sample(random):
for c in cs:
p_ids = tokenizer.encode(p)[0]
q_ids = tokenizer.encode(q)[0][1:]
c_ids = tokenizer.encode(c)[0][1:]
truncate_sequences(maxlen, -2, c_ids, q_ids, p_ids)
token_ids = p_ids + q_ids + c_ids
batch_token_ids.append(token_ids)
batch_segment_ids.append([0] * len(token_ids))
batch_labels.append([a])
if len(batch_token_ids) == self.batch_size * num_classes or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
batch_labels = sequence_padding(batch_labels)
yield [batch_token_ids, batch_segment_ids], batch_labels
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
# 转换数据集
train_generator = data_generator(train_data, batch_size)
valid_generator = data_generator(valid_data, batch_size)
def multichoice_crossentropy(y_true, y_pred):
"""多项选择的交叉熵
"""
y_true = K.cast(y_true, 'int32')[::num_classes]
y_pred = K.reshape(y_pred, (-1, num_classes))
return K.mean(
K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
)
def multichoice_accuracy(y_true, y_pred):
"""多项选择的准确率
"""
y_true = K.cast(y_true, 'int32')[::num_classes, 0]
y_pred = K.reshape(y_pred, (-1, num_classes))
y_pred = K.cast(K.argmax(y_pred, axis=1), 'int32')
return K.mean(K.cast(K.equal(y_true, y_pred), K.floatx()))
# 构建模型
output = base.model.output
output = keras.layers.Lambda(lambda x: x[:, 0])(output)
output = keras.layers.Dense(units=1,
kernel_initializer=base.initializer)(output)
model = keras.models.Model(base.model.input, output)
model.summary()
model.compile(
loss=multichoice_crossentropy,
optimizer=optimizer4,
metrics=[multichoice_accuracy]
)
class Evaluator(keras.callbacks.Callback):
"""保存验证集acc最好的模型
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, epoch, logs=None):
val_acc = self.evaluate(valid_generator)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
model.save_weights('weights/c3.weights')
print(
u'val_acc: %.5f, best_val_acc: %.5f\n' %
(val_acc, self.best_val_acc)
)
def evaluate(self, data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).reshape((-1, num_classes))
y_pred = y_pred.argmax(axis=1)
y_true = y_true[::num_classes, 0]
total += len(y_true)
right += (y_true == y_pred).sum()
return right / total
def test_predict(in_file, out_file):
"""输出测试结果到文件
结果文件可以提交到 https://www.cluebenchmarks.com 评测。
"""
test_data = load_data(in_file)
test_generator = data_generator(test_data, batch_size)
results = []
for x_true, _ in tqdm(test_generator, ncols=0):
y_pred = model.predict(x_true).reshape((-1, num_classes))
y_pred = y_pred.argmax(axis=1)
results.extend(y_pred)
fw = open(out_file, 'w')
with open(in_file) as fr:
data = json.load(fr)
i = 0
for d in data:
for qa in d[1]:
l = json.dumps({'id': str(qa['id']), 'label': str(results[i])})
fw.write(l + '\n')
i += 1
fw.close()
if __name__ == '__main__':
evaluator = Evaluator()
model.fit_generator(
train_generator.forfit(),
steps_per_epoch=len(train_generator),
epochs=epochs,
callbacks=[evaluator]
)
model.load_weights('weights/c3.weights')
test_predict(
in_file=data_path + 'c3/test1.0.json',
out_file='results/c310_predict.json'
)
test_predict(
in_file=data_path + 'c3/test1.1.json',
out_file='results/c311_predict.json'
)
else:
model.load_weights('weights/c3.weights')
| 29.089474
| 79
| 0.599602
| 749
| 5,527
| 4.162884
| 0.256342
| 0.028865
| 0.029185
| 0.032072
| 0.257858
| 0.220334
| 0.173509
| 0.154907
| 0.076972
| 0.039128
| 0
| 0.01716
| 0.272481
| 5,527
| 189
| 80
| 29.243386
| 0.758269
| 0.041071
| 0
| 0.134328
| 0
| 0
| 0.059115
| 0.009535
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059701
| false
| 0
| 0.059701
| 0
| 0.164179
| 0.007463
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ae4d12e5b6c5a2ce81f0095493d76c6afcfb99b
| 3,481
|
py
|
Python
|
logpy/util.py
|
mrocklin/logpy
|
7e32f4da10a0ab5b86fb23947cfce9a4d49c6b3f
|
[
"BSD-3-Clause"
] | 1
|
2016-09-20T16:05:12.000Z
|
2016-09-20T16:05:12.000Z
|
logpy/util.py
|
mrocklin/logpy
|
7e32f4da10a0ab5b86fb23947cfce9a4d49c6b3f
|
[
"BSD-3-Clause"
] | null | null | null |
logpy/util.py
|
mrocklin/logpy
|
7e32f4da10a0ab5b86fb23947cfce9a4d49c6b3f
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools as it
from toolz.compatibility import range, map, iteritems
def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def transitive_get(key, d):
""" Transitive dict.get
>>> from logpy.util import transitive_get
>>> d = {1: 2, 2: 3, 3: 4}
>>> d.get(1)
2
>>> transitive_get(1, d)
4
"""
while hashable(key) and key in d:
key = d[key]
return key
def deep_transitive_get(key, d):
""" Transitive get that propagates within tuples
>>> from logpy.util import transitive_get, deep_transitive_get
>>> d = {1: (2, 3), 2: 12, 3: 13}
>>> transitive_get(1, d)
(2, 3)
>>> deep_transitive_get(1, d)
(12, 13)
"""
key = transitive_get(key, d)
if isinstance(key, tuple):
return tuple(map(lambda k: deep_transitive_get(k, d), key))
else:
return key
def dicthash(d):
return hash(frozenset(d.items()))
def multihash(x):
try:
return hash(x)
except TypeError:
if isinstance(x, (list, tuple, set, frozenset)):
return hash(tuple(map(multihash, x)))
if type(x) is dict:
return hash(frozenset(map(multihash, x.items())))
if type(x) is slice:
return hash((x.start, x.stop, x.step))
raise TypeError('Hashing not covered for ' + str(x))
def unique(seq, key=lambda x: x):
seen = set()
for item in seq:
try:
if key(item) not in seen:
seen.add(key(item))
yield item
except TypeError: # item probably isn't hashable
yield item # Just return it and hope for the best
def interleave(seqs, pass_exceptions=()):
iters = map(iter, seqs)
while iters:
newiters = []
for itr in iters:
try:
yield next(itr)
newiters.append(itr)
except (StopIteration,) + tuple(pass_exceptions):
pass
iters = newiters
def take(n, seq):
if n is None:
return seq
if n == 0:
return tuple(seq)
return tuple(it.islice(seq, 0, n))
def evalt(t):
""" Evaluate tuple if unevaluated
>>> from logpy.util import evalt
>>> add = lambda x, y: x + y
>>> evalt((add, 2, 3))
5
>>> evalt(add(2, 3))
5
"""
if isinstance(t, tuple) and len(t) >= 1 and callable(t[0]):
return t[0](*t[1:])
else:
return t
def intersection(*seqs):
return (item for item in seqs[0]
if all(item in seq for seq in seqs[1:]))
def groupsizes(total, len):
""" Groups of length len that add up to total
>>> from logpy.util import groupsizes
>>> tuple(groupsizes(4, 2))
((1, 3), (2, 2), (3, 1))
"""
if len == 1:
yield (total,)
else:
for i in range(1, total - len + 1 + 1):
for perm in groupsizes(total - i, len - 1):
yield (i,) + perm
def raises(err, lamda):
try:
lamda()
raise Exception("Did not raise %s"%err)
except err:
pass
def pprint(g):
""" Pretty print a tree of goals """
if callable(g) and hasattr(g, '__name__'):
return g.__name__
if isinstance(g, type):
return g.__name__
if isinstance(g, tuple):
return "(" + ', '.join(map(pprint, g)) + ")"
return str(g)
def index(tup, ind):
""" Fancy indexing with tuples """
return tuple(tup[i] for i in ind)
| 24.687943
| 67
| 0.545246
| 482
| 3,481
| 3.879668
| 0.278008
| 0.076471
| 0.027807
| 0.040642
| 0.110695
| 0.059893
| 0
| 0
| 0
| 0
| 0
| 0.02292
| 0.323183
| 3,481
| 140
| 68
| 24.864286
| 0.770798
| 0.214019
| 0
| 0.215909
| 0
| 0
| 0.020046
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159091
| false
| 0.045455
| 0.022727
| 0.022727
| 0.420455
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ae561e06496768e94110f91362d5a5eeb524bdb
| 545
|
py
|
Python
|
index.py
|
rinocloud/rinobot-plugin-shift
|
4f7f16a5e610b91b64377733d24b6ab4b63daa67
|
[
"MIT"
] | null | null | null |
index.py
|
rinocloud/rinobot-plugin-shift
|
4f7f16a5e610b91b64377733d24b6ab4b63daa67
|
[
"MIT"
] | null | null | null |
index.py
|
rinocloud/rinobot-plugin-shift
|
4f7f16a5e610b91b64377733d24b6ab4b63daa67
|
[
"MIT"
] | null | null | null |
import rinobot_plugin as bot
import numpy as np
def main():
# lets get our parameters and data
filepath = bot.filepath()
data = bot.loadfile(filepath)
# now comes the custom plugin logic
shift = bot.get_arg('shift', type=float, required=True)
index = bot.index_from_args(data)
data[index] = data[index] + shift
outname = bot.no_extension() + '-shift-%s.txt' % shift
# then we set up the output
outpath = bot.output_filepath(outname)
np.savetxt(outpath, data)
if __name__ == "__main__":
main()
| 25.952381
| 59
| 0.669725
| 77
| 545
| 4.558442
| 0.584416
| 0.051282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.218349
| 545
| 20
| 60
| 27.25
| 0.823944
| 0.168807
| 0
| 0
| 0
| 0
| 0.057906
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|