code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def _buildItem(self, elem, cls=None, initpath=None):
initpath = initpath or self._initpath
if cls is not None:
return cls(self._server, elem, initpath)
etype = elem.attrib.get('type', elem.attrib.get('streamType'))
ehash = '%s.%s' % (elem.tag, etype) if etype else elem.tag
ecls = utils.PLEXOBJECTS.get(ehash, utils.PLEXOBJECTS.get(elem.tag))
if ecls is not None:
return ecls(self._server, elem, initpath)
raise UnknownType("Unknown library type <%s type='%s'../>" % (elem.tag, etype))
|
Factory function to build objects based on registered PLEXOBJECTS.
|
def connectMSExchange(server):
if not sspi:
return False, 'No sspi module found.'
code, response = server.ehlo()
if code != SMTP_EHLO_OKAY:
return False, 'Server did not respond to EHLO command.'
sspi_client = sspi.ClientAuth('NTLM')
sec_buffer = None
err, sec_buffer = sspi_client.authorize(sec_buffer)
buffer = sec_buffer[0].Buffer
ntlm_message = base64.encodestring(buffer).replace('\n', '')
code, response = server.docmd('AUTH', 'NTLM ' + ntlm_message)
if code != SMTP_AUTH_CHALLENGE:
msg = 'Server did not respond as expected to NTLM negotiate message'
return False, msg
err, sec_buffer = sspi_client.authorize(base64.decodestring(response))
buffer = sec_buffer[0].Buffer
ntlm_message = base64.encodestring(buffer).replace('\n', '')
code, response = server.docmd('', ntlm_message)
if code != SMTP_AUTH_OKAY:
return False, response
return True, ''
|
Creates a connection for the inputted server to a Microsoft Exchange server.
:param server | <smtplib.SMTP>
:usage |>>> import smtplib
|>>> import projex.notify
|>>> smtp = smtplib.SMTP('mail.server.com')
|>>> projex.notify.connectMSExchange(smtp)
:return (<bool> success, <str> reason)
|
def load_from_cache(path=user_path):
if not path:
return
try:
with open(path, 'rb') as f:
dversion, mversion, data = pickle.load(f)
if dversion == data_version and mversion == module_version:
return data
except (FileNotFoundError, ValueError, EOFError):
pass
|
Try to load category ranges from userlevel cache file.
:param path: path to userlevel cache file
:type path: str
:returns: category ranges dict or None
:rtype: None or dict of RangeGroup
|
def convex_conj(self):
conj_exp = conj_exponent(self.pointwise_norm.exponent)
return IndicatorGroupL1UnitBall(self.domain, exponent=conj_exp)
|
The convex conjugate functional of the group L1-norm.
|
def extract(self, item):
article_candidates = []
for extractor in self.extractor_list:
article_candidates.append(extractor.extract(item))
article_candidates = self.cleaner.clean(article_candidates)
article = self.comparer.compare(item, article_candidates)
item['article_title'] = article.title
item['article_description'] = article.description
item['article_text'] = article.text
item['article_image'] = article.topimage
item['article_author'] = article.author
item['article_publish_date'] = article.publish_date
item['article_language'] = article.language
return item
|
Runs the HTML-response trough a list of initialized extractors, a cleaner and compares the results.
:param item: NewscrawlerItem to be processed.
:return: An updated NewscrawlerItem including the results of the extraction
|
def _get_ca_certs_paths():
ca_certs = []
embedded_root = os.path.dirname(os.path.abspath(__file__))
for _ in range(10):
if os.path.basename(embedded_root) == 'embedded':
ca_certs.append(os.path.join(embedded_root, 'ssl', 'certs', 'cacert.pem'))
break
embedded_root = os.path.dirname(embedded_root)
else:
raise OSError(
'Unable to locate `embedded` directory. Please specify ca_certs in your http yaml configuration file.'
)
try:
import tornado
except ImportError:
pass
else:
ca_certs.append(os.path.join(os.path.dirname(tornado.__file__), 'ca-certificates.crt'))
ca_certs.append('/etc/ssl/certs/ca-certificates.crt')
return ca_certs
|
Get a list of possible paths containing certificates
Check is installed via pip to:
* Windows: embedded/lib/site-packages/datadog_checks/http_check
* Linux: embedded/lib/python2.7/site-packages/datadog_checks/http_check
Certificate is installed to:
* embedded/ssl/certs/cacert.pem
walk up to `embedded`, and back down to ssl/certs to find the certificate file
|
def trigger_all_callbacks(self, callbacks=None):
return [ret
for key in self
for ret in self.trigger_callbacks(key, callbacks=None)]
|
Trigger callbacks for all keys on all or a subset of subscribers.
:param Iterable callbacks: list of callbacks or none for all subscribed
:rtype: Iterable[tornado.concurrent.Future]
|
def _parse_log_entry(entry_pb):
try:
return MessageToDict(entry_pb)
except TypeError:
if entry_pb.HasField("proto_payload"):
proto_payload = entry_pb.proto_payload
entry_pb.ClearField("proto_payload")
entry_mapping = MessageToDict(entry_pb)
entry_mapping["protoPayload"] = proto_payload
return entry_mapping
else:
raise
|
Special helper to parse ``LogEntry`` protobuf into a dictionary.
The ``proto_payload`` field in ``LogEntry`` is of type ``Any``. This
can be problematic if the type URL in the payload isn't in the
``google.protobuf`` registry. To help with parsing unregistered types,
this function will remove ``proto_payload`` before parsing.
:type entry_pb: :class:`.log_entry_pb2.LogEntry`
:param entry_pb: Log entry protobuf.
:rtype: dict
:returns: The parsed log entry. The ``protoPayload`` key may contain
the raw ``Any`` protobuf from ``entry_pb.proto_payload`` if
it could not be parsed.
|
def ssl_context(self, verify=True, cert_reqs=None,
check_hostname=False, certfile=None, keyfile=None,
cafile=None, capath=None, cadata=None, **kw):
assert ssl, 'SSL not supported'
cafile = cafile or DEFAULT_CA_BUNDLE_PATH
if verify is True:
cert_reqs = ssl.CERT_REQUIRED
check_hostname = True
if isinstance(verify, str):
cert_reqs = ssl.CERT_REQUIRED
if os.path.isfile(verify):
cafile = verify
elif os.path.isdir(verify):
capath = verify
return ssl._create_unverified_context(cert_reqs=cert_reqs,
check_hostname=check_hostname,
certfile=certfile,
keyfile=keyfile,
cafile=cafile,
capath=capath,
cadata=cadata)
|
Create a SSL context object.
This method should not be called by from user code
|
def empty_wav(wav_path: Union[Path, str]) -> bool:
with wave.open(str(wav_path), 'rb') as wav_f:
return wav_f.getnframes() == 0
|
Check if a wav contains data
|
def set_general_setting(key, value, qsettings=None):
if not qsettings:
qsettings = QSettings()
qsettings.setValue(key, deep_convert_dict(value))
|
Set value to QSettings based on key.
:param key: Unique key for setting.
:type key: basestring
:param value: Value to be saved.
:type value: QVariant
:param qsettings: A custom QSettings to use. If it's not defined, it will
use the default one.
:type qsettings: qgis.PyQt.QtCore.QSettings
|
def range_to_numeric(ranges):
values, units = zip(*(r.split() for r in ranges))
unit = os.path.commonprefix([u[::-1] for u in units])
prefixes = (u[:-len(unit)] for u in units)
values = [float(v) * SI_PREFIX[p] for v, p in zip(values, prefixes)]
return values
|
Converts a sequence of string ranges to a sequence of floats.
E.g.::
>>> range_to_numeric(['1 uV', '2 mV', '1 V'])
[1E-6, 0.002, 1.0]
|
def photparse(tab):
if 'source_id' not in tab[0].keys():
raise KeyError('phot=TRUE requires the source_id columb be included')
uniqueid = []
for i in range(len(tab)):
tmpid = tab[i]['source_id']
if tmpid not in uniqueid:
uniqueid.append(tmpid)
newtab = []
for sourceid in uniqueid:
tmpdict = photaddline(tab, sourceid)
newtab.append(tmpdict)
return newtab
|
Parse through a photometry table to group by source_id
Parameters
----------
tab: list
SQL query dictionary list from running query_dict.execute()
Returns
-------
newtab: list
Dictionary list after parsing to group together sources
|
def get_unique_scan_parameter_combinations(meta_data_array, scan_parameters=None, scan_parameter_columns_only=False):
try:
last_not_parameter_column = meta_data_array.dtype.names.index('error_code')
except ValueError:
last_not_parameter_column = meta_data_array.dtype.names.index('error')
if last_not_parameter_column == len(meta_data_array.dtype.names) - 1:
return
if scan_parameters is None:
return unique_row(meta_data_array, use_columns=range(4, len(meta_data_array.dtype.names)), selected_columns_only=scan_parameter_columns_only)
else:
use_columns = []
for scan_parameter in scan_parameters:
try:
use_columns.append(meta_data_array.dtype.names.index(scan_parameter))
except ValueError:
logging.error('No scan parameter ' + scan_parameter + ' found')
raise RuntimeError('Scan parameter not found')
return unique_row(meta_data_array, use_columns=use_columns, selected_columns_only=scan_parameter_columns_only)
|
Takes the numpy meta data array and returns the first rows with unique combinations of different scan parameter values for selected scan parameters.
If selected columns only is true, the returned histogram only contains the selected columns.
Parameters
----------
meta_data_array : numpy.ndarray
scan_parameters : list of string, None
Scan parameter names taken. If None all are used.
selected_columns_only : bool
Returns
-------
numpy.Histogram
|
def command(command):
with cd(env.remote_path):
sudo(env.python + ' manage.py %s' % command, user=env.remote_user)
|
Run custom Django management command
|
def are_budget_data_package_fields_filled_in(self, resource):
fields = ['country', 'currency', 'year', 'status']
return all([self.in_resource(f, resource) for f in fields])
|
Check if the budget data package fields are all filled in because
if not then this can't be a budget data package
|
def load_data(self, filename, ext):
from spyder_kernels.utils.iofuncs import iofunctions
from spyder_kernels.utils.misc import fix_reference_name
glbs = self._mglobals()
load_func = iofunctions.load_funcs[ext]
data, error_message = load_func(filename)
if error_message:
return error_message
for key in list(data.keys()):
new_key = fix_reference_name(key, blacklist=list(glbs.keys()))
if new_key != key:
data[new_key] = data.pop(key)
try:
glbs.update(data)
except Exception as error:
return str(error)
return None
|
Load data from filename
|
def src_paths(self):
return {src for src, summary in self._diff_violations().items()
if len(summary.measured_lines) > 0}
|
Return a list of source files in the diff
for which we have coverage information.
|
def explain(self, expr, params=None):
if isinstance(expr, ir.Expr):
context = self.dialect.make_context(params=params)
query_ast = self._build_ast(expr, context)
if len(query_ast.queries) > 1:
raise Exception('Multi-query expression')
query = query_ast.queries[0].compile()
else:
query = expr
statement = 'EXPLAIN {0}'.format(query)
with self._execute(statement, results=True) as cur:
result = self._get_list(cur)
return 'Query:\n{0}\n\n{1}'.format(
util.indent(query, 2), '\n'.join(result)
)
|
Query for and return the query plan associated with the indicated
expression or SQL query.
Returns
-------
plan : string
|
def DEFINE_boolean(
name, default, help, flag_values=_flagvalues.FLAGS, module_name=None,
**args):
DEFINE_flag(_flag.BooleanFlag(name, default, help, **args),
flag_values, module_name)
|
Registers a boolean flag.
Such a boolean flag does not take an argument. If a user wants to
specify a false value explicitly, the long option beginning with 'no'
must be used: i.e. --noflag
This flag will have a value of None, True or False. None is possible
if default=None and the user does not specify the flag on the command
line.
Args:
name: str, the flag name.
default: bool|str|None, the default value of the flag.
help: str, the help message.
flag_values: FlagValues, the FlagValues instance with which the flag will
be registered. This should almost never need to be overridden.
module_name: str, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
**args: dict, the extra keyword args that are passed to Flag __init__.
|
def upload_file(self, abspath, cloud_filename):
if not self.test_run:
content = open(abspath, "rb")
content_type = get_content_type(cloud_filename, content)
headers = get_headers(cloud_filename, content_type)
if headers.get("Content-Encoding") == "gzip":
content = get_gzipped_contents(content)
size = content.size
else:
size = os.stat(abspath).st_size
self.container.create(
obj_name=cloud_filename,
data=content,
content_type=content_type,
content_length=size,
content_encoding=headers.get("Content-Encoding", None),
headers=headers,
ttl=CUMULUS["FILE_TTL"],
etag=None,
)
self.upload_count += 1
if not self.quiet or self.verbosity > 1:
print("Uploaded: {0}".format(cloud_filename))
|
Uploads a file to the container.
|
def GetIndex(self):
if self.index is None:
return None
if self.index:
res, ui = chmlib.chm_resolve_object(self.file, self.index)
if (res != chmlib.CHM_RESOLVE_SUCCESS):
return None
size, text = chmlib.chm_retrieve_object(self.file, ui, 0l, ui.length)
if (size == 0):
sys.stderr.write('GetIndex: file size = 0\n')
return None
return text
|
Reads and returns the index tree.
This auxiliary function reads and returns the index tree file
contents for the CHM archive.
|
def info(ctx):
controller = ctx.obj['controller']
version = controller.version
click.echo(
'OATH version: {}.{}.{}'.format(version[0], version[1], version[2]))
click.echo('Password protection ' +
('enabled' if controller.locked else 'disabled'))
keys = ctx.obj['settings'].get('keys', {})
if controller.locked and controller.id in keys:
click.echo('The password for this YubiKey is remembered by ykman.')
if ctx.obj['dev'].is_fips:
click.echo('FIPS Approved Mode: {}'.format(
'Yes' if controller.is_in_fips_mode else 'No'))
|
Display status of OATH application.
|
def export_for_training(self, file_path='./export.json'):
import json
export = {'conversations': self._generate_export_data()}
with open(file_path, 'w+') as jsonfile:
json.dump(export, jsonfile, ensure_ascii=False)
|
Create a file from the database that can be used to
train other chat bots.
|
def subtract(self,range2):
outranges = []
if self.chr != range2.chr:
outranges.append(self.copy())
return outranges
if not self.overlaps(range2):
outranges.append(self.copy())
return outranges
if range2.start <= self.start and range2.end >= self.end:
return outranges
if range2.start > self.start:
nrng = type(self)(self.chr,self.start+self._start_offset,range2.start-1,self.payload,self.dir)
outranges.append(nrng)
if range2.end < self.end:
nrng = type(self)(self.chr,range2.end+1+self._start_offset,self.end,self.payload,self.dir)
outranges.append(nrng)
return outranges
|
Take another range, and list of ranges after removing range2, keep options from self
:param range2:
:type range2: GenomicRange
:return: List of Genomic Ranges
:rtype: GenomicRange[]
|
async def close(self, *, force_after=30):
if self.transport:
self.transport.close()
try:
async with timeout_after(force_after):
await self.closed_event.wait()
except TaskTimeout:
self.abort()
await self.closed_event.wait()
|
Close the connection and return when closed.
|
def validate_contents(file_contents):
for name, contents in file_contents.items():
if os.path.splitext(name)[1] != '.ipynb':
continue
if not contents:
return False
try:
json_object = json.loads(contents)
except ValueError:
return False
return True
|
Ensures that all ipynb files in FILE_CONTENTS
are valid JSON files.
|
def stop_actions(self):
self._stop_actions, value = self.get_attr_set(self._stop_actions, 'stop_actions')
return value
|
Gets a list of stop actions. Valid values are `coast`
and `brake`.
|
def _gap(src_interval, tar_interval):
assert src_interval.bits == tar_interval.bits, "Number of bits should be same for operands"
s = src_interval
t = tar_interval
(_, b) = (s.lower_bound, s.upper_bound)
(c, _) = (t.lower_bound, t.upper_bound)
w = s.bits
if (not t._surrounds_member(b)) and (not s._surrounds_member(c)):
return StridedInterval(lower_bound=c, upper_bound=b, bits=w, stride=1).complement
return StridedInterval.empty(w)
|
Refer section 3.1; gap function.
:param src_interval: first argument or interval 1
:param tar_interval: second argument or interval 2
:return: Interval representing gap between two intervals
|
def calc_across_paths_textnodes(paths_nodes, dbg=False):
for path_nodes in paths_nodes:
cnt = len(path_nodes[1][0])
ttl = sum([len(s) for s in paths_nodes[1][0]])
path_nodes[1][1] = cnt
path_nodes[1][2] = ttl
path_nodes[1][3] = ttl/ cnt
if dbg:
print(path_nodes[1])
|
Given a list of parent paths tupled with children textnodes, plus
initialized feature values, we calculate the total and average string
length of the parent's children textnodes.
|
def group(text, size):
if size <= 0:
raise ValueError("n must be a positive integer")
return [text[i:i + size] for i in range(0, len(text), size)]
|
Group ``text`` into blocks of ``size``.
Example:
>>> group("test", 2)
['te', 'st']
Args:
text (str): text to separate
size (int): size of groups to split the text into
Returns:
List of n-sized groups of text
Raises:
ValueError: If n is non positive
|
def destroy(self):
if self._running is False:
return
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
|
Tear down the minion
|
def pint_multiply(da, q, out_units=None):
a = 1 * units2pint(da)
f = a * q.to_base_units()
if out_units:
f = f.to(out_units)
out = da * f.magnitude
out.attrs['units'] = pint2cfunits(f.units)
return out
|
Multiply xarray.DataArray by pint.Quantity.
Parameters
----------
da : xr.DataArray
Input array.
q : pint.Quantity
Multiplicating factor.
out_units : str
Units the output array should be converted into.
|
def _to_dict(self):
physical_prop_names = find_PhysicalProperty(self)
physical_prop_vals = [getattr(self, prop) for prop in physical_prop_names]
return dict(zip(physical_prop_names, physical_prop_vals))
|
Return a dictionary representation of the current object.
|
def category(**kwargs):
if 'series' in kwargs:
kwargs.pop('series')
path = 'series'
else:
path = None
return Fred().category(path, **kwargs)
|
Get a category.
|
def add_dicts(*args):
counters = [Counter(arg) for arg in args]
return dict(reduce(operator.add, counters))
|
Adds two or more dicts together. Common keys will have their values added.
For example::
>>> t1 = {'a':1, 'b':2}
>>> t2 = {'b':1, 'c':3}
>>> t3 = {'d':4}
>>> add_dicts(t1, t2, t3)
{'a': 1, 'c': 3, 'b': 3, 'd': 4}
|
def rename_dimension(self, old_name, new_name):
if old_name not in self.dimension_names:
raise ValueError("Shape %s does not have dimension named %s"
% (self, old_name))
return Shape(
[Dimension(new_name, d.size) if d.name == old_name else d
for d in self.dims])
|
Returns a copy where one dimension is renamed.
|
def _get_dst_resolution(self, dst_res=None):
if dst_res is None:
dst_res = min(self._res_indices.keys())
return dst_res
|
Get default resolution, i.e. the highest resolution or smallest cell size.
|
def get_instance(self, payload):
return EventInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], )
|
Build an instance of EventInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.event.EventInstance
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventInstance
|
def _find_unchanged(old, new):
edges = []
old_edges = [set(edge) for edge in old.edges()]
new_edges = [set(edge) for edge in new.edges()]
for old_edge in old_edges:
if old_edge in new_edges:
edges.append(set(old_edge))
return edges
|
returns edges that are in both old and new
|
def hazard_extra_keyword(keyword, feature, parent):
_ = feature, parent
hazard_layer_path = QgsExpressionContextUtils. \
projectScope(QgsProject.instance()).variable(
'hazard_layer')
hazard_layer = load_layer(hazard_layer_path)[0]
keywords = KeywordIO.read_keywords(hazard_layer)
extra_keywords = keywords.get('extra_keywords')
if extra_keywords:
value = extra_keywords.get(keyword)
if value:
value_definition = definition(value)
if value_definition:
return value_definition['name']
return value
else:
return tr('Keyword %s is not found' % keyword)
return tr('No extra keywords found')
|
Given a keyword, it will return the value of the keyword
from the hazard layer's extra keywords.
For instance:
* hazard_extra_keyword( 'depth' ) -> will return the value of 'depth'
in current hazard layer's extra keywords.
|
def parse(text, showToc=True):
p = Parser(show_toc=showToc)
return p.parse(text)
|
Returns HTML from MediaWiki markup
|
def handle_request(self, environ, start_response):
urls = self.url_map.bind_to_environ(environ)
try:
endpoint, args = urls.match()
environ['pywb.app_prefix'] = environ.get('SCRIPT_NAME')
response = endpoint(environ, **args)
return response(environ, start_response)
except HTTPException as e:
redir = self._check_refer_redirect(environ)
if redir:
return redir(environ, start_response)
return e(environ, start_response)
except Exception as e:
if self.debug:
traceback.print_exc()
response = self.rewriterapp._error_response(environ, 'Internal Error: ' + str(e), '500 Server Error')
return response(environ, start_response)
|
Retrieves the route handler and calls the handler returning its the response
:param dict environ: The WSGI environment dictionary for the request
:param start_response:
:return: The WbResponse for the request
:rtype: WbResponse
|
def getLiteral(self):
chars = u''
c = self.current()
while True:
if c and c == u"\\":
c = self.next()
if c:
chars += c
continue
elif not c or (c in self.meta_chars):
break
else:
chars += c
if self.lookahead() and self.lookahead() in self.meta_chars:
break
c = self.next()
return StringGenerator.Literal(chars)
|
Get a sequence of non-special characters.
|
def dump(self, name: str, inst):
"Save the object instance to the stash."
self.stash.dump(name, inst)
|
Save the object instance to the stash.
|
def u8(self, name, value=None, align=None):
self.uint(1, name, value, align)
|
Add an unsigned 1 byte integer field to template.
This is an convenience method that simply calls `Uint` keyword with predefined length.
|
def filter_thumbnail_files(chan_path, filenames, metadata_provider):
thumbnail_files_to_skip = metadata_provider.get_thumbnail_paths()
filenames_cleaned = []
for filename in filenames:
keep = True
chan_filepath = os.path.join(chan_path, filename)
chan_filepath_tuple = path_to_tuple(chan_filepath)
if chan_filepath_tuple in thumbnail_files_to_skip:
keep = False
if keep:
filenames_cleaned.append(filename)
return filenames_cleaned
|
We don't want to create `ContentNode` from thumbnail files.
|
def deprecated_attr(namespace, attr, replacement):
_deprecated_attrs.setdefault(namespace, []).append((attr, replacement))
|
Marks a module level attribute as deprecated. Accessing it will emit
a PyGIDeprecationWarning warning.
e.g. for ``deprecated_attr("GObject", "STATUS_FOO", "GLib.Status.FOO")``
accessing GObject.STATUS_FOO will emit:
"GObject.STATUS_FOO is deprecated; use GLib.Status.FOO instead"
:param str namespace:
The namespace of the override this is called in.
:param str namespace:
The attribute name (which gets added to __all__).
:param str replacement:
The replacement text which will be included in the warning.
|
def debug_sql(sql: str, *args: Any) -> None:
log.debug("SQL: %s" % sql)
if args:
log.debug("Args: %r" % args)
|
Writes SQL and arguments to the log.
|
def imagenet50(display=False, resolution=224):
prefix = github_data_url + "imagenet50_"
X = np.load(cache(prefix + "%sx%s.npy" % (resolution, resolution))).astype(np.float32)
y = np.loadtxt(cache(prefix + "labels.csv"))
return X, y
|
This is a set of 50 images representative of ImageNet images.
This dataset was collected by randomly finding a working ImageNet link and then pasting the
original ImageNet image into Google image search restricted to images licensed for reuse. A
similar image (now with rights to reuse) was downloaded as a rough replacment for the original
ImageNet image. The point is to have a random sample of ImageNet for use as a background
distribution for explaining models trained on ImageNet data.
Note that because the images are only rough replacements the labels might no longer be correct.
|
def GetFormatterObject(cls, data_type):
data_type = data_type.lower()
if data_type not in cls._formatter_objects:
formatter_object = None
if data_type in cls._formatter_classes:
formatter_class = cls._formatter_classes[data_type]
formatter_object = formatter_class()
if not formatter_object:
logger.warning(
'Using default formatter for data type: {0:s}'.format(data_type))
formatter_object = default.DefaultFormatter()
cls._formatter_objects[data_type] = formatter_object
return cls._formatter_objects[data_type]
|
Retrieves the formatter object for a specific data type.
Args:
data_type (str): data type.
Returns:
EventFormatter: corresponding formatter or the default formatter if
not available.
|
def jenkins_rao(target, throat_perimeter='throat.perimeter',
throat_area='throat.area',
throat_diameter='throat.indiameter'):
r
P = target[throat_perimeter]
A = target[throat_area]
r = target[throat_diameter]/2
value = (P/A)/(2/r)
return value
|
r"""
Jenkins and Rao relate the capillary pressure in an eliptical throat to
the aspect ratio
References
----------
Jenkins, R.G. and Rao, M.B., The effect of elliptical pores on
mercury porosimetry results. Powder technology, 38(2), pp.177-180. (1984)
|
def _get_status_descr_by_id(status_id):
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
|
Return linode status by ID
status_id
linode VM status ID
|
def info(self):
info = []
for prior_model_name, prior_model in self.prior_model_tuples:
info.append(prior_model.name + '\n')
info.extend([f"{prior_model_name}_{item}" for item in prior_model.info])
return '\n'.join(info)
|
Use the priors that make up the model_mapper to generate information on each parameter of the overall model.
This information is extracted from each priors *model_info* property.
|
def attribute_exists(self, name):
if name in self:
if issubclass(self[name].__class__, Attribute):
return True
return False
|
Returns if given attribute exists in the node.
Usage::
>>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute())
>>> node_a.attribute_exists("attributeA")
True
>>> node_a.attribute_exists("attributeC")
False
:param name: Attribute name.
:type name: unicode
:return: Attribute exists.
:rtype: bool
|
def _get_node(self, node_id):
self.non_terminated_nodes({})
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
matches = list(self.ec2.instances.filter(InstanceIds=[node_id]))
assert len(matches) == 1, "Invalid instance id {}".format(node_id)
return matches[0]
|
Refresh and get info for this node, updating the cache.
|
def find_specs(self, directory):
specs = []
spec_files = self.file_finder.find(directory)
for spec_file in spec_files:
specs.extend(self.spec_finder.find(spec_file.module))
return specs
|
Finds all specs in a given directory. Returns a list of
Example and ExampleGroup instances.
|
def interface_direct_class(data_class):
if data_class in ASSET:
interface = AssetsInterface()
elif data_class in PARTY:
interface = PartiesInterface()
elif data_class in BOOK:
interface = BooksInterface()
elif data_class in CORPORATE_ACTION:
interface = CorporateActionsInterface()
elif data_class in MARKET_DATA:
interface = MarketDataInterface()
elif data_class in TRANSACTION:
interface = TransactionsInterface()
else:
interface = AssetManagersInterface()
return interface
|
help to direct to the correct interface interacting with DB by class name only
|
def get_xyz_2d(self, xcoord, x, ycoord, y, u, v):
xy = xcoord.values.ravel() + 1j * ycoord.values.ravel()
dist = np.abs(xy - (x + 1j * y))
imin = np.nanargmin(dist)
xy_min = xy[imin]
return (xy_min.real, xy_min.imag, u.values.ravel()[imin],
v.values.ravel()[imin])
|
Get closest x, y and z for the given `x` and `y` in `data` for
2d coords
|
def write_xml(self, xmlfile, config=None):
root = ElementTree.Element('source_library')
root.set('title', 'source_library')
for s in self._srcs:
s.write_xml(root)
if config is not None:
srcs = self.create_diffuse_srcs(config)
diffuse_srcs = {s.name: s for s in srcs}
for s in self._diffuse_srcs:
src = copy.deepcopy(diffuse_srcs.get(s.name, s))
src.update_spectral_pars(s.spectral_pars)
src.write_xml(root)
else:
for s in self._diffuse_srcs:
s.write_xml(root)
output_file = open(xmlfile, 'w')
output_file.write(utils.prettify_xml(root))
|
Save the ROI model as an XML file.
|
def change_to_workdir(self):
logger.info("Changing working directory to: %s", self.workdir)
self.check_dir(self.workdir)
try:
os.chdir(self.workdir)
except OSError as exp:
self.exit_on_error("Error changing to working directory: %s. Error: %s. "
"Check the existence of %s and the %s/%s account "
"permissions on this directory."
% (self.workdir, str(exp), self.workdir, self.user, self.group),
exit_code=3)
self.pre_log.append(("INFO", "Using working directory: %s" % os.path.abspath(self.workdir)))
|
Change working directory to working attribute
:return: None
|
def convert_html_to_xml(self):
if hasattr(self, 'content') and self.content != '':
regex = r'<(?!/)(?!!)'
xml_content = re.sub(regex, '<xhtml:', self.content)
return xml_content
else:
return ''
|
Parses the HTML parsed texts and converts its tags to XML valid tags.
:returns: HTML enabled text in a XML valid format.
:rtype: str
|
def save(self, fname: str):
mx.nd.save(fname, self.source + self.target + self.label)
|
Saves the dataset to a binary .npy file.
|
def get_local_roles_for(brain_or_object, user=None):
user_id = get_user_id(user)
obj = api.get_object(brain_or_object)
return sorted(obj.get_local_roles_for_userid(user_id))
|
Get the local defined roles on the context
Code extracted from `IRoleManager.get_local_roles_for_userid`
:param brain_or_object: Catalog brain or object
:param user: A user ID, user object or None (for the current user)
:returns: List of granted local roles on the given object
|
def maybe_transactional(func):
@wraps(func)
def wrapper(*args, **kwargs):
commit = kwargs.get("commit", True)
with transaction(commit=commit):
return func(*args, **kwargs)
return wrapper
|
Variant of `transactional` that will not commit if there's an argument `commit` with a falsey value.
Useful for dry-run style operations.
|
def random_integers(cls, size, seed=None):
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
return cls.from_sequence(size).hash(seed)
|
Returns an SArray with random integer values.
|
def _ord_to_str(ordinal, weights):
chars = []
for weight in weights:
if ordinal == 0:
return "".join(chars)
ordinal -= 1
index, ordinal = divmod(ordinal, weight)
chars.append(_ALPHABET[index])
return "".join(chars)
|
Reverse function of _str_to_ord.
|
def _get_driver(driver_type):
if driver_type == 'phantomjs':
return webdriver.PhantomJS(service_log_path=os.path.devnull)
if driver_type == 'firefox':
return webdriver.Firefox(firefox_options=FIREFOXOPTIONS)
elif driver_type == 'chrome':
chrome_options = webdriver.ChromeOptions()
for arg in CHROME_WEBDRIVER_ARGS:
chrome_options.add_argument(arg)
return webdriver.Chrome(chrome_options=chrome_options)
else:
raise USPSError('{} not supported'.format(driver_type))
|
Get webdriver.
|
def show_form_for_method(self, view, method, request, obj):
if method not in view.allowed_methods:
return
try:
view.check_permissions(request)
if obj is not None:
view.check_object_permissions(request, obj)
except exceptions.APIException:
return False
return True
|
Returns True if a form should be shown for this method.
|
def getUTC(self, utcoffset):
newTime = (self.value - utcoffset.value) % 24
return Time(newTime)
|
Returns a new Time object set to UTC given
an offset Time object.
|
def custom_server_error(request, template_name='500.html', admin_template_name='500A.html'):
trace = None
if request.user.is_authenticated() and (request.user.is_staff or request.user.is_superuser):
try:
import traceback, sys
trace = traceback.format_exception(*(sys.exc_info()))
if not request.user.is_superuser and trace:
trace = trace[-1:]
trace = '\n'.join(trace)
except:
pass
if request.path.startswith('/%s' % admin.site.name):
template_name = admin_template_name
t = loader.get_template(template_name)
return http.HttpResponseServerError(t.render(Context({'trace': trace})))
|
500 error handler. Displays a full trackback for superusers and the first line of the
traceback for staff members.
Templates: `500.html` or `500A.html` (admin)
Context: trace
Holds the traceback information for debugging.
|
def _option(value):
if value in __opts__:
return __opts__[value]
master_opts = __pillar__.get('master', {})
if value in master_opts:
return master_opts[value]
if value in __pillar__:
return __pillar__[value]
|
Look up the value for an option.
|
def value(self, value):
try:
struct.pack('>' + conf.TYPE_CHAR, value)
except struct.error:
raise IllegalDataValueError
self._value = value
|
Value to be written on register.
:param value: An integer.
:raises: IllegalDataValueError when value isn't in range.
|
def visit_object(self, node):
if current_app.debug:
return tags.comment('no implementation in {} to render {}'.format(
self.__class__.__name__,
node.__class__.__name__, ))
return ''
|
Fallback rendering for objects.
If the current application is in debug-mode
(``flask.current_app.debug`` is ``True``), an ``<!-- HTML comment
-->`` will be rendered, indicating which class is missing a visitation
function.
Outside of debug-mode, returns an empty string.
|
def image_tasks(self):
uri = "/%s/tasks" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body
|
Returns a json-schema document that represents a container of tasks
entities.
|
def read(config_values):
if not config_values:
raise RheaError('Cannot read config_value: `{}`'.format(config_values))
config_values = to_list(config_values)
config = {}
for config_value in config_values:
config_value = ConfigSpec.get_from(value=config_value)
config_value.check_type()
config_results = config_value.read()
if config_results and isinstance(config_results, Mapping):
config = deep_update(config, config_results)
elif config_value.check_if_exists:
raise RheaError('Cannot read config_value: `{}`'.format(config_value))
return config
|
Reads an ordered list of configuration values and deep merge the values in reverse order.
|
def proportional_weights(self, fraction_stdev=1.0, wmax=100.0,
leave_zero=True):
new_weights = []
for oval, ow in zip(self.observation_data.obsval,
self.observation_data.weight):
if leave_zero and ow == 0.0:
ow = 0.0
elif oval == 0.0:
ow = wmax
else:
nw = 1.0 / (np.abs(oval) * fraction_stdev)
ow = min(wmax, nw)
new_weights.append(ow)
self.observation_data.weight = new_weights
|
setup weights inversely proportional to the observation value
Parameters
----------
fraction_stdev : float
the fraction portion of the observation
val to treat as the standard deviation. set to 1.0 for
inversely proportional
wmax : float
maximum weight to allow
leave_zero : bool
flag to leave existing zero weights
|
def add_resources_to_registry():
from deform.widget import default_resource_registry
default_resource_registry.set_js_resources("jqueryui", None, None)
default_resource_registry.set_js_resources("datetimepicker", None, None)
default_resource_registry.set_js_resources("custom_dates", None, None)
default_resource_registry.set_js_resources(
"radio_choice_toggle", None, None
)
default_resource_registry.set_js_resources("checkbox_toggle", None, None)
from js.deform import resource_mapping
from js.select2 import select2
resource_mapping['select2'] = select2
from js.jquery_timepicker_addon import timepicker
resource_mapping['datetimepicker'] = timepicker
resource_mapping['custom_dates'] = custom_dates
resource_mapping['radio_choice_toggle'] = radio_choice_toggle
resource_mapping['checkbox_toggle'] = checkbox_toggle
|
Add resources to the deform registry
|
def _get_user(self, user: Union[User, str]) -> User:
user_id: str = getattr(user, 'user_id', user)
discovery_room = self._global_rooms.get(
make_room_alias(self.network_id, DISCOVERY_DEFAULT_ROOM),
)
if discovery_room and user_id in discovery_room._members:
duser = discovery_room._members[user_id]
if getattr(user, 'displayname', None):
assert isinstance(user, User)
duser.displayname = user.displayname
user = duser
elif not isinstance(user, User):
user = self._client.get_user(user_id)
return user
|
Creates an User from an user_id, if none, or fetch a cached User
As all users are supposed to be in discovery room, its members dict is used for caching
|
def file_md5(f, size=8192):
"Calculates the MD5 of a file."
md5 = hashlib.md5()
while True:
data = f.read(size)
if not data:
break
md5.update(data)
return md5.hexdigest()
|
Calculates the MD5 of a file.
|
def add_output_variable(self, var):
assert(isinstance(var, Variable))
self.output_variable_list.append(var)
|
Adds the argument variable as one of the output variable
|
def decode(dct, intype='json', raise_error=False):
for decoder in get_plugins('decoders').values():
if (set(list(decoder.dict_signature)).issubset(dct.keys())
and hasattr(decoder, 'from_{}'.format(intype))
and getattr(decoder, 'allow_other_keys', False)):
return getattr(decoder, 'from_{}'.format(intype))(dct)
break
elif (sorted(list(decoder.dict_signature)) == sorted(dct.keys())
and hasattr(decoder, 'from_{}'.format(intype))):
return getattr(decoder, 'from_{}'.format(intype))(dct)
break
if raise_error:
raise ValueError('no suitable plugin found for: {}'.format(dct))
else:
return dct
|
decode dict objects, via decoder plugins, to new type
Parameters
----------
intype: str
use decoder method from_<intype> to encode
raise_error : bool
if True, raise ValueError if no suitable plugin found
Examples
--------
>>> load_builtin_plugins('decoders')
[]
>>> from decimal import Decimal
>>> decode({'_python_Decimal_':'1.3425345'})
Decimal('1.3425345')
>>> unload_all_plugins()
|
def find_sink_variables(self):
is_sink = {name: True for name in self.variables.keys()}
for operator in self.operators.values():
for variable in operator.inputs:
is_sink[variable.onnx_name] = False
return [variable for name, variable in self.variables.items() if is_sink[name]]
|
Find sink variables in this scope
|
def uploads(self):
if self.syncEnabled == True:
return Uploads(url=self._url + "/uploads",
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return None
|
returns the class to perform the upload function. it will
only return the uploads class if syncEnabled is True.
|
def _decoder(image, shrink):
decoder = dmtxDecodeCreate(image, shrink)
if not decoder:
raise PyLibDMTXError('Could not create decoder')
else:
try:
yield decoder
finally:
dmtxDecodeDestroy(byref(decoder))
|
A context manager for `DmtxDecode`, created and destroyed by
`dmtxDecodeCreate` and `dmtxDecodeDestroy`.
Args:
image (POINTER(DmtxImage)):
shrink (int):
Yields:
POINTER(DmtxDecode): The created decoder
Raises:
PyLibDMTXError: If the decoder could not be created.
|
def _ParseSignatureIdentifiers(self, data_location, signature_identifiers):
if not signature_identifiers:
return
if not data_location:
raise ValueError('Missing data location.')
path = os.path.join(data_location, 'signatures.conf')
if not os.path.exists(path):
raise IOError(
'No such format specification file: {0:s}'.format(path))
try:
specification_store = self._ReadSpecificationFile(path)
except IOError as exception:
raise IOError((
'Unable to read format specification file: {0:s} with error: '
'{1!s}').format(path, exception))
signature_identifiers = signature_identifiers.lower()
signature_identifiers = [
identifier.strip() for identifier in signature_identifiers.split(',')]
file_entry_filter = file_entry_filters.SignaturesFileEntryFilter(
specification_store, signature_identifiers)
self._filter_collection.AddFilter(file_entry_filter)
|
Parses the signature identifiers.
Args:
data_location (str): location of the format specification file, for
example, "signatures.conf".
signature_identifiers (str): comma separated signature identifiers.
Raises:
IOError: if the format specification file could not be read from
the specified data location.
OSError: if the format specification file could not be read from
the specified data location.
ValueError: if no data location was specified.
|
def dumps(self, bucket=None):
return [
self.file_cls(o, self.filesmap.get(o.key, {})).dumps()
for o in sorted_files_from_bucket(bucket or self.bucket, self.keys)
]
|
Serialize files from a bucket.
:param bucket: Instance of files
:class:`invenio_files_rest.models.Bucket`. (Default:
``self.bucket``)
:returns: List of serialized files.
|
def centroid(X):
C = np.sum(X, axis=0) / len(X)
return C
|
Calculate the centroid from a matrix X
|
def _compile_rules(self):
for state, table in self.RULES.items():
patterns = list()
actions = list()
nextstates = list()
for i, row in enumerate(table):
if len(row) == 2:
pattern, _action = row
nextstate = None
elif len(row) == 3:
pattern, _action, nextstate = row
else:
fstr = "invalid RULES: state {}, row {}"
raise CompileError(fstr.format(state, i))
patterns.append(pattern)
actions.append(_action)
nextstates.append(nextstate)
reobj = re.compile('|'.join("(" + p + ")" for p in patterns))
self._rules[state] = (reobj, actions, nextstates)
|
Compile the rules into the internal lexer state.
|
def _store_credentials(self, username, password, remember=False):
if username and password and remember:
CONF.set('main', 'report_error/username', username)
try:
keyring.set_password('github', username, password)
except Exception:
if self._show_msgbox:
QMessageBox.warning(self.parent_widget,
_('Failed to store password'),
_('It was not possible to securely '
'save your password. You will be '
'prompted for your Github '
'credentials next time you want '
'to report an issue.'))
remember = False
CONF.set('main', 'report_error/remember_me', remember)
|
Store credentials for future use.
|
def update(self, new_email_address, name, custom_fields, resubscribe, consent_to_track, restart_subscription_based_autoresponders=False):
validate_consent_to_track(consent_to_track)
params = {"email": self.email_address}
body = {
"EmailAddress": new_email_address,
"Name": name,
"CustomFields": custom_fields,
"Resubscribe": resubscribe,
"ConsentToTrack": consent_to_track,
"RestartSubscriptionBasedAutoresponders": restart_subscription_based_autoresponders}
response = self._put("/subscribers/%s.json" % self.list_id,
body=json.dumps(body), params=params)
self.email_address = new_email_address
|
Updates any aspect of a subscriber, including email address, name, and
custom field data if supplied.
|
def get_groups(self):
def process_result(result):
return [self.get_group(group) for group in result]
return Command('get', [ROOT_GROUPS], process_result=process_result)
|
Return the groups linked to the gateway.
Returns a Command.
|
def update_type_lookups(self):
self.type_to_typestring = dict(zip(self.types,
self.python_type_strings))
self.typestring_to_type = dict(zip(self.python_type_strings,
self.types))
|
Update type and typestring lookup dicts.
Must be called once the ``types`` and ``python_type_strings``
attributes are set so that ``type_to_typestring`` and
``typestring_to_type`` are constructed.
.. versionadded:: 0.2
Notes
-----
Subclasses need to call this function explicitly.
|
def _read_opt_ilnp(self, code, *, desc):
_type = self._read_opt_type(code)
_size = self._read_unpack(1)
_nval = self._read_fileng(_size)
opt = dict(
desc=desc,
type=_type,
length=_size + 2,
value=_nval,
)
return opt
|
Read HOPOPT ILNP Nonce option.
Structure of HOPOPT ILNP Nonce option [RFC 6744]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len | Option Type | Option Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ Nonce Value /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.ilnp.type Option Type
0 0 hopopt.ilnp.type.value Option Number
0 0 hopopt.ilnp.type.action Action (10)
0 2 hopopt.ilnp.type.change Change Flag (0)
1 8 hopopt.ilnp.length Length of Option Data
2 16 hopopt.ilnp.value Nonce Value
|
def normalize_json(template):
obj = parse_cloudformation_template(template)
json_str = json.dumps(
obj, sort_keys=True, indent=4, default=str, separators=(',', ': '),
)
result = []
lines = json_str.split("\n")
for line in lines:
result.append(line + "\n")
return result
|
Normalize our template for diffing.
Args:
template(str): string representing the template
Returns:
list: json representation of the parameters
|
def group_members(self, group_id, include_orphans=False):
params = {'includeOrphans': str(include_orphans).lower()}
url = self._service_url(['triggers', 'groups', group_id, 'members'], params=params)
return Trigger.list_to_object_list(self._get(url))
|
Find all group member trigger definitions
:param group_id: group trigger id
:param include_orphans: If True, include orphan members
:return: list of asociated group members as trigger objects
|
def create_html(api_key, attrs):
gif = get_gif(api_key, attrs['gif_id'])
if 'alt' not in attrs.keys():
attrs['alt'] = 'source: {}'.format(gif['data']['source'])
html_out = '<a href="{}">'.format(gif['data']['url'])
html_out += '<img src="{}" alt="{}">'.format(
gif['data']['images']['original']['url'],
attrs['alt'])
html_out += '</a>'
return html_out
|
Returns complete html tag string.
|
def run_async(**kwargs):
r = init_runner(**kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
|
Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
|
def transceive(self, data, timeout=None):
log.debug(">> {0}".format(hexlify(data)))
data = self._dep.exchange(data, timeout)
log.debug("<< {0}".format(hexlify(data) if data else "None"))
return data
|
Transmit arbitrary data and receive the response.
This is a low level method to send arbitrary data to the
tag. While it should almost always be better to use
:meth:`send_apdu` this is the only way to force a specific
timeout value (which is otherwise derived from the Tag's
answer to select). The *timeout* value is expected as a float
specifying the seconds to wait.
|
def convert_to_mb(s):
s = s.upper()
try:
if s.endswith('G'):
return float(s[:-1].strip()) * 1024
elif s.endswith('T'):
return float(s[:-1].strip()) * 1024 * 1024
else:
return float(s[:-1].strip())
except (IndexError, ValueError, KeyError, TypeError):
errmsg = ("Invalid memory format: %s") % s
raise exception.SDKInternalError(msg=errmsg)
|
Convert memory size from GB to MB.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.