code
stringlengths
59
3.37k
docstring
stringlengths
8
15.5k
def earth_rates(ATTITUDE): from math import sin, cos, tan, fabs p = ATTITUDE.rollspeed q = ATTITUDE.pitchspeed r = ATTITUDE.yawspeed phi = ATTITUDE.roll theta = ATTITUDE.pitch psi = ATTITUDE.yaw phiDot = p + tan(theta)*(q*sin(phi) + r*cos(phi)) thetaDot = q*cos(phi) - r*sin(phi) if fabs(cos(theta)) < 1.0e-20: theta += 1.0e-10 psiDot = (q*sin(phi) + r*cos(phi))/cos(theta) return (phiDot, thetaDot, psiDot)
return angular velocities in earth frame
def add_index_operation(self, name, operations): if name not in self._index_operations: self._add_io(name, operations) else: raise AttributeError("An index operation with the name {} was already taken".format(name))
Add index operation with name to the operations given. raises: attribute error if operations exist.
def render_django_response(self, **kwargs): from django.http import HttpResponse return HttpResponse( self.render(**kwargs), content_type='image/svg+xml' )
Render the graph, and return a Django response
def _encode_uuid(name, value, dummy, opts): uuid_representation = opts.uuid_representation if uuid_representation == OLD_UUID_SUBTYPE: return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes elif uuid_representation == JAVA_LEGACY: from_uuid = value.bytes data = from_uuid[0:8][::-1] + from_uuid[8:16][::-1] return b"\x05" + name + b'\x10\x00\x00\x00\x03' + data elif uuid_representation == CSHARP_LEGACY: return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes_le else: return b"\x05" + name + b'\x10\x00\x00\x00\x04' + value.bytes
Encode uuid.UUID.
def patch(*args,**kwargs): from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon if ( 'coor' not in kwargs or 'conn' not in kwargs ): raise IOError('Specify both "coor" and "conn"') axis = kwargs.pop( 'axis' , plt.gca() ) cindex = kwargs.pop( 'cindex' , None ) coor = kwargs.pop( 'coor' , None ) conn = kwargs.pop( 'conn' , None ) autoscale = kwargs.pop( 'autoscale' , True ) kwargs.setdefault('edgecolor','k') if cindex is None: kwargs.setdefault('facecolor',(0.,0.,0.,0.)) if coor is not None and conn is not None: poly = [] for iconn in conn: poly.append(Polygon(coor[iconn,:])) args = tuple(poly, *args) p = PatchCollection(args,**kwargs) if cindex is not None: p.set_array(cindex) axis.add_collection(p) if autoscale: xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ] ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ] axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])]) axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])]) return p
Add patches to plot. The color of the patches is indexed according to a specified color-index. :example: Plot a finite element mesh: the outline of the undeformed configuration, and the deformed configuration for which the elements get a color e.g. based on stress:: import matplotlib.pyplot as plt import goosempl as gplt fig,ax = plt.subplots() p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None) _ = gplt.patch(coor=coor ,conn=conn,axis=ax) cbar = fig.colorbar(p,axis=ax,aspect=10) plt.show() :arguments - option 1/2: **patches** (``<list>``) List with patch objects. Can be replaced by specifying ``coor`` and ``conn``. :arguments - option 2/2: **coor** (``<numpy.ndarray>`` | ``<list>`` (nested)) Matrix with on each row the coordinates (positions) of each node. **conn** (``<numpy.ndarray>`` | ``<list>`` (nested)) Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch). :options: **cindex** (``<numpy.ndarray>``) Array with, for each patch, the value that should be indexed to a color. **axis** (``<matplotlib>``) Specify an axis to include to plot in. By default the current axis is used. **autoscale** ([``True``] | ``False``) Automatically update the limits of the plot (currently automatic limits of Collections are not supported by matplotlib). :recommended options: **cmap** (``<str>`` | ...) Specify a colormap. **linewidth** (``<float>``) Width of the edges. **edgecolor** (``<str>`` | ...) Color of the edges. **clim** (``(<float>,<float>)``) Lower and upper limit of the color-axis. :returns: **handle** (``<matplotlib>``) Handle of the patch objects. .. seealso:: * `matplotlib example <http://matplotlib.org/examples/api/patch_collection.html>`_.
def get_asset_temporal_session(self): if not self.supports_asset_temporal(): raise Unimplemented() try: from . import sessions except ImportError: raise try: session = sessions.AssetTemporalSession(proxy=self._proxy, runtime=self._runtime) except AttributeError: raise return session
Gets the session for retrieving temporal coverage of an asset. return: (osid.repository.AssetTemporalSession) - an AssetTemporalSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_temporal() is false compliance: optional - This method must be implemented if supports_asset_temporal() is true.
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): self.fileExtension = extension with open(path, 'r') as f: for line in f: sline = line.strip().split() if len(sline) == 1: self.numParameters = sline[0] else: target = TargetParameter(targetVariable=sline[0], varFormat=sline[1]) target.replaceParamFile = self
Replace Param File Read from File Method
def _value_with_fmt(self, val): fmt = None if is_integer(val): val = int(val) elif is_float(val): val = float(val) elif is_bool(val): val = bool(val) elif isinstance(val, datetime): fmt = self.datetime_format elif isinstance(val, date): fmt = self.date_format elif isinstance(val, timedelta): val = val.total_seconds() / float(86400) fmt = '0' else: val = compat.to_str(val) return val, fmt
Convert numpy types to Python types for the Excel writers. Parameters ---------- val : object Value to be written into cells Returns ------- Tuple with the first element being the converted value and the second being an optional format
def _spill(self): global MemoryBytesSpilled, DiskBytesSpilled if self._file is None: self._open_file() used_memory = get_used_memory() pos = self._file.tell() self._ser.dump_stream(self.values, self._file) self.values = [] gc.collect() DiskBytesSpilled += self._file.tell() - pos MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
dump the values into disk
def serialize_smarttag(ctx, document, el, root): "Serializes smarttag." if ctx.options['smarttag_span']: _span = etree.SubElement(root, 'span', {'class': 'smarttag', 'data-smarttag-element': el.element}) else: _span = root for elem in el.elements: _ser = ctx.get_serializer(elem) if _ser: _td = _ser(ctx, document, elem, _span) else: if isinstance(elem, doc.Text): children = list(_span) if len(children) == 0: _text = _span.text or u'' _span.text = u'{}{}'.format(_text, elem.text) else: _text = children[-1].tail or u'' children[-1].tail = u'{}{}'.format(_text, elem.text) fire_hooks(ctx, document, el, _span, ctx.get_hook('smarttag')) return root
Serializes smarttag.
def focus_next_matching(self, querystring): self.focus_property(lambda x: x._message.matches(querystring), self._tree.next_position)
focus next matching message in depth first order
def service(self): self.payload, self.payload_peer_address = \ self.datagram_socket.recvfrom(UDP_MAX_DGRAM_LENGTH) _logger.debug("Received datagram from peer: %s", self.payload_peer_address) if not self.payload: self.payload_peer_address = None return if self.connections.has_key(self.payload_peer_address): self.forward() else: return self.payload_peer_address
Service the root socket Read from the root socket and forward one datagram to a connection. The call will return without forwarding data if any of the following occurs: * An error is encountered while reading from the root socket * Reading from the root socket times out * The root socket is non-blocking and has no data available * An empty payload is received * A non-empty payload is received from an unknown peer (a peer for which get_connection has not yet been called); in this case, the payload is held by this instance and will be forwarded when the forward method is called Return: if the datagram received was from a new peer, then the peer's address; otherwise None
def remote_method(self, method, block=False, **params): def func(*args): call = self.call(method, *args, **params) if block: return call.result() return call func.__name__ = method return func
Creates a Python function that will attempt to call a remote method when used. :param method: str, Method name on the server to call :param block: bool, Wheter to wait for result or not Usage:: >>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True) >>> send_usher_token("some token") 'Token Accepted'
def search(cls, query_string, options=None, enable_facet_discovery=False, return_facets=None, facet_options=None, facet_refinements=None, deadline=None, **kwargs): search_class = cls.search_get_class_names()[-1] query_string += ' ' + 'class_name:%s' % (search_class,) q = search.Query( query_string=query_string, options=options, enable_facet_discovery=enable_facet_discovery, return_facets=return_facets, facet_options=facet_options, facet_refinements=facet_refinements ) index = cls.search_get_index() return index.search(q, deadline=deadline, **kwargs)
Searches the index. Conveniently searches only for documents that belong to instances of this class. :param query_string: The query to match against documents in the index. See search.Query() for details. :param options: A QueryOptions describing post-processing of search results. :param enable_facet_discovery: discovery top relevent facets to this search query and return them. :param return_facets: An iterable of FacetRequest or basestring as facet name to return specific facet with the result. :param facet_options: A FacetOption describing processing of facets. :param facet_refinements: An iterable of FacetRefinement objects or refinement token strings used to filter out search results based on a facet value. refinements for different facets will be conjunction and refinements for the same facet will be disjunction. :param deadline: Deadline for RPC call in seconds; if None use the default. :param kwargs: A SearchResults containing a list of documents matched, number returned and number matched by the query. :return: A SearchResults containing a list of documents matched, number returned and number matched by the query. :raises: QueryError: If the query string is not parseable. TypeError: If any of the parameters have invalid types, or an unknown attribute is passed. ValueError: If any of the parameters have invalid values (e.g., a negative deadline).
def download_next_song_cache(self): if len(self.queue) == 0: return cache_ydl_opts = dict(ydl_opts) cache_ydl_opts["outtmpl"] = self.output_format_next with youtube_dl.YoutubeDL(cache_ydl_opts) as ydl: try: url = self.queue[0][0] ydl.download([url]) except: pass
Downloads the next song in the queue to the cache
def process_update(self, update: types.Update): yield 'update_id', update.update_id if update.message: yield 'update_type', 'message' yield from self.process_message(update.message) if update.edited_message: yield 'update_type', 'edited_message' yield from self.process_message(update.edited_message) if update.channel_post: yield 'update_type', 'channel_post' yield from self.process_message(update.channel_post) if update.edited_channel_post: yield 'update_type', 'edited_channel_post' yield from self.process_message(update.edited_channel_post) if update.inline_query: yield 'update_type', 'inline_query' yield from self.process_inline_query(update.inline_query) if update.chosen_inline_result: yield 'update_type', 'chosen_inline_result' yield from self.process_chosen_inline_result(update.chosen_inline_result) if update.callback_query: yield 'update_type', 'callback_query' yield from self.process_callback_query(update.callback_query) if update.shipping_query: yield 'update_type', 'shipping_query' yield from self.process_shipping_query(update.shipping_query) if update.pre_checkout_query: yield 'update_type', 'pre_checkout_query' yield from self.process_pre_checkout_query(update.pre_checkout_query)
Parse Update object :param update: :return:
def stream_filesystem_node(path, recursive=False, patterns='**', chunk_size=default_chunk_size): is_dir = isinstance(path, six.string_types) and os.path.isdir(path) if recursive or is_dir: return stream_directory(path, recursive, patterns, chunk_size) else: return stream_files(path, chunk_size)
Gets a buffered generator for streaming either files or directories. Returns a buffered generator which encodes the file or directory at the given path as :mimetype:`multipart/form-data` with the corresponding headers. Parameters ---------- path : str The filepath of the directory or file to stream recursive : bool Stream all content within the directory recursively? patterns : str | list Single *glob* pattern or list of *glob* patterns and compiled regular expressions to match the names of the filepaths to keep chunk_size : int Maximum size of each stream chunk
def guess_version(redeem_script): n = riemann.get_current_network_name() if 'sprout' in n: return 1 if 'overwinter' in n: return 3 if 'sapling' in n: return 4 try: script_array = redeem_script.split() script_array.index('OP_CHECKSEQUENCEVERIFY') return 2 except ValueError: return 1
str -> int Bitcoin uses tx version 2 for nSequence signaling. Zcash uses tx version 2 for joinsplits. We want to signal nSequence if we're using OP_CSV. Unless we're in zcash.
def build(dburl, sitedir, mode): if mode == 'force': amode = ['-a'] else: amode = [] oldcwd = os.getcwd() os.chdir(sitedir) db = StrictRedis.from_url(dburl) job = get_current_job(db) job.meta.update({'out': '', 'milestone': 0, 'total': 1, 'return': None, 'status': None}) job.save() p = subprocess.Popen([executable, '-m', 'nikola', 'build'] + amode, stderr=subprocess.PIPE) milestones = { 'done!': 0, 'render_posts': 0, 'render_pages': 0, 'generate_rss': 0, 'render_indexes': 0, 'sitemap': 0 } out = [] while p.poll() is None: nl = p.stderr.readline().decode('utf-8') for k in milestones: if k in nl: milestones[k] = 1 out.append(nl) job.meta.update({'milestone': sum(milestones.values()), 'total': len(milestones), 'out': ''.join(out), 'return': None, 'status': None}) job.save() out += p.stderr.readlines() out = ''.join(out) job.meta.update({'milestone': len(milestones), 'total': len(milestones), 'out': ''.join(out), 'return': p.returncode, 'status': p.returncode == 0}) job.save() os.chdir(oldcwd) return p.returncode
Build a site.
def getElementText(self, node, preserve_ws=None): result = [] for child in node.childNodes: nodetype = child.nodeType if nodetype == child.TEXT_NODE or \ nodetype == child.CDATA_SECTION_NODE: result.append(child.nodeValue) value = join(result, '') if preserve_ws is None: value = strip(value) return value
Return the text value of an xml element node. Leading and trailing whitespace is stripped from the value unless the preserve_ws flag is passed with a true value.
def sync_time(self): now = time.localtime(time.time()) self.remote(set_time, (now.tm_year, now.tm_mon, now.tm_mday, now.tm_wday + 1, now.tm_hour, now.tm_min, now.tm_sec, 0)) return now
Sets the time on the pyboard to match the time on the host.
def convert_to_namespace(file, output, keyword): resource = parse_bel_resource(file) write_namespace( namespace_keyword=(keyword or resource['AnnotationDefinition']['Keyword']), namespace_name=resource['AnnotationDefinition']['Keyword'], namespace_description=resource['AnnotationDefinition']['DescriptionString'], author_name='Charles Tapley Hoyt', namespace_domain=NAMESPACE_DOMAIN_OTHER, values=resource['Values'], citation_name=resource['Citation']['NameString'], file=output )
Convert an annotation file to a namespace file.
def exception_message(self) -> Union[str, None]: if self.has_error: exception_data = self._raw.get("exception", {}) return exception_data.get("message") return None
On Lavalink V3, if there was an exception during a load or get tracks call this property will be populated with the error message. If there was no error this property will be ``None``.
def clean_account(self): account = self.cleaned_data['account'] if not account: return if account.type != Account.TYPES.income: raise ValidationError('Account must be an income account') try: account.housemate except Housemate.DoesNotExist: pass else: raise ValidationError('Account already has a housemate') return account
Ensure this is an income account
def shutdown_executors(wait=True): return {k: shutdown_executor(k, wait) for k in list(_EXECUTORS.keys())}
Clean-up the resources of all initialized executors. :param wait: If True then shutdown will not return until all running futures have finished executing and the resources used by the executors have been reclaimed. :type wait: bool :return: Shutdown pool executor. :rtype: dict[str,dict]
async def receive_message_batch_async(self, max_batch_size=None, on_message_received=None, timeout=0): self._message_received_callback = on_message_received max_batch_size = max_batch_size or self._prefetch if max_batch_size > self._prefetch: raise ValueError( 'Maximum batch size {} cannot be greater than the ' 'connection link credit: {}'.format(max_batch_size, self._prefetch)) timeout = self._counter.get_current_ms() + int(timeout) if timeout else 0 expired = False self._received_messages = self._received_messages or queue.Queue() await self.open_async() receiving = True batch = [] while not self._received_messages.empty() and len(batch) < max_batch_size: batch.append(self._received_messages.get()) self._received_messages.task_done() if len(batch) >= max_batch_size: return batch while receiving and not expired and len(batch) < max_batch_size: while receiving and self._received_messages.qsize() < max_batch_size: if timeout and self._counter.get_current_ms() > timeout: expired = True break before = self._received_messages.qsize() receiving = await self.do_work_async() received = self._received_messages.qsize() - before if self._received_messages.qsize() > 0 and received == 0: expired = True break while not self._received_messages.empty() and len(batch) < max_batch_size: batch.append(self._received_messages.get()) self._received_messages.task_done() return batch
Receive a batch of messages asynchronously. This method will return as soon as some messages are available rather than waiting to achieve a specific batch size, and therefore the number of messages returned per call will vary up to the maximum allowed. If the receive client is configured with `auto_complete=True` then the messages received in the batch returned by this function will already be settled. Alternatively, if `auto_complete=False`, then each message will need to be explicitly settled before it expires and is released. :param max_batch_size: The maximum number of messages that can be returned in one call. This value cannot be larger than the prefetch value, and if not specified, the prefetch value will be used. :type max_batch_size: int :param on_message_received: A callback to process messages as they arrive from the service. It takes a single argument, a ~uamqp.message.Message object. :type on_message_received: callable[~uamqp.message.Message] :param timeout: I timeout in milliseconds for which to wait to receive any messages. If no messages are received in this time, an empty list will be returned. If set to 0, the client will continue to wait until at least one message is received. The default is 0. :type timeout: int
def declare_alias(self, name): def decorator(f): self._auto_register_function(f, name) return f return decorator
Insert a Python function into this Namespace with an explicitly-given name, but detect its argument count automatically.
def method_delegate(**methods): methods = {k.upper(): v for k, v in iteritems(methods)} if PY3: methods = {k.encode("utf-8"): v for k, v in iteritems(methods)} def render(request): renderer = methods.get(request.method) if renderer is None: return Response(code=405) return renderer(request) return render
Construct a renderer that delegates based on the request's HTTP method.
def remove_namespace(doc, namespace): ns = '{{{}}}'.format(namespace) nsl = len(ns) for elem in doc.getiterator(): if elem.tag.startswith(ns): elem.tag = elem.tag[nsl:] return doc
Takes in a ElementTree object and namespace value. The length of that namespace value is removed from all Element nodes within the document. This effectively removes the namespace from that document. :param doc: lxml.etree :param namespace: Namespace that needs to be removed. :return: Returns the source document with namespaces removed.
def list(self, **kwargs): resp = self.client.api.volumes(**kwargs) if not resp.get('Volumes'): return [] return [self.prepare_model(obj) for obj in resp['Volumes']]
List volumes. Similar to the ``docker volume ls`` command. Args: filters (dict): Server-side list filtering options. Returns: (list of :py:class:`Volume`): The volumes. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def parse_response(resp): host_id, msg, code = None, None, None try: content = resp.content root = ET.fromstring(content) code = root.find('./Code').text msg = root.find('./Message').text request_id = root.find('./RequestId').text host_id = root.find('./HostId').text except ETParseError: request_id = resp.headers.get('x-odps-request-id', None) if len(resp.content) > 0: obj = json.loads(resp.text) msg = obj['Message'] code = obj.get('Code') host_id = obj.get('HostId') if request_id is None: request_id = obj.get('RequestId') else: return clz = globals().get(code, ODPSError) return clz(msg, request_id=request_id, code=code, host_id=host_id)
Parses the content of response and returns an exception object.
def _build_netengine_arguments(self): arguments = { "host": self.host } if self.config is not None: for key, value in self.config.iteritems(): arguments[key] = value if self.port: arguments["port"] = self.port return arguments
returns a python dictionary representing arguments that will be passed to a netengine backend for internal use only
def retract(self, jid, node, id_, *, notify=False): retract = pubsub_xso.Retract() retract.node = node item = pubsub_xso.Item() item.id_ = id_ retract.item = item retract.notify = notify iq = aioxmpp.stanza.IQ(to=jid, type_=aioxmpp.structs.IQType.SET) iq.payload = pubsub_xso.Request( retract ) yield from self.client.send(iq)
Retract a previously published item from a node. :param jid: Address of the PubSub service. :type jid: :class:`aioxmpp.JID` :param node: Name of the PubSub node to send a notify from. :type node: :class:`str` :param id_: The ID of the item to retract. :type id_: :class:`str` :param notify: Flag indicating whether subscribers shall be notified about the retraction. :type notify: :class:`bool` :raises aioxmpp.errors.XMPPError: as returned by the service Retract an item previously published to `node` at `jid`. `id_` must be the ItemID of the item to retract. If `notify` is set to true, notifications will be generated (by setting the `notify` attribute on the retraction request).
def load_config_vars(target_config, source_config): for attr in dir(source_config): if attr.startswith('_'): continue val = getattr(source_config, attr) if val is not None: setattr(target_config, attr, val)
Loads all attributes from source config into target config @type target_config: TestRunConfigManager @param target_config: Config to dump variables into @type source_config: TestRunConfigManager @param source_config: The other config @return: True
async def _thread_coro(self, *args): return await self._loop.run_in_executor( self._executor, self._function, *args)
Coroutine called by MapAsync. It's wrapping the call of run_in_executor to run the synchronous function as thread
def dot_v2(vec1, vec2): return vec1.x * vec2.x + vec1.y * vec2.y
Return the dot product of two vectors
def _get_last_node_for_prfx(self, node, key_prfx, seen_prfx): node_type = self._get_node_type(node) if node_type == NODE_TYPE_BLANK: return BLANK_NODE if node_type == NODE_TYPE_BRANCH: if not key_prfx: return node sub_node = self._decode_to_node(node[key_prfx[0]]) seen_prfx.append(key_prfx[0]) return self._get_last_node_for_prfx(sub_node, key_prfx[1:], seen_prfx) curr_key = key_nibbles_from_key_value_node(node) if node_type == NODE_TYPE_LEAF: if starts_with(curr_key, key_prfx): return node else: return BLANK_NODE if node_type == NODE_TYPE_EXTENSION: if len(key_prfx) > len(curr_key): if starts_with(key_prfx, curr_key): sub_node = self._get_inner_node_from_extension(node) seen_prfx.extend(curr_key) return self._get_last_node_for_prfx(sub_node, key_prfx[len(curr_key):], seen_prfx) else: return BLANK_NODE else: if starts_with(curr_key, key_prfx): return node else: return BLANK_NODE
get last node for the given prefix, also update `seen_prfx` to track the path already traversed :param node: node in form of list, or BLANK_NODE :param key_prfx: prefix to look for :param seen_prfx: prefix already seen, updates with each call :return: BLANK_NODE if does not exist, otherwise value or hash
def get_recipe_env(self, arch, with_flags_in_cc=True): env = super(ScryptRecipe, self).get_recipe_env(arch, with_flags_in_cc) openssl_recipe = self.get_recipe('openssl', self.ctx) env['CFLAGS'] += openssl_recipe.include_flags(arch) env['LDFLAGS'] += ' -L{}'.format(self.ctx.get_libs_dir(arch.arch)) env['LDFLAGS'] += ' -L{}'.format(self.ctx.libs_dir) env['LDFLAGS'] += openssl_recipe.link_dirs_flags(arch) env['LIBS'] = env.get('LIBS', '') + openssl_recipe.link_libs_flags() return env
Adds openssl recipe to include and library path.
def _save_cb(self, w): format = self.saved_type if format is None: return self.fv.show_error("Please save an image first.") filename = self.w.name.get_text().strip() if len(filename) == 0: return self.fv.show_error("Please set a name for saving the file") self.save_name = filename if not filename.lower().endswith('.' + format): filename = filename + '.' + format path = self.w.folder.get_text().strip() if path == '': path = filename else: self.save_path = path path = os.path.join(path, filename) self.fv.error_wrap(shutil.copyfile, self.tmpname, path)
This function is called when the user clicks the 'Save' button. We save the last taken shot to the folder and name specified.
def _selectStuff(self, verb='SELECT'): sqlResults = self._runQuery(verb, self._queryTarget) for row in sqlResults: yield self._massageData(row)
Return a generator which yields the massaged results of this query with a particular SQL verb. For an attribute query, massaged results are of the type of that attribute. For an item query, they are items of the type the query is supposed to return. @param verb: a str containing the SQL verb to execute. This really must be some variant of 'SELECT', the only two currently implemented being 'SELECT' and 'SELECT DISTINCT'.
def train_history(self, tid=None): def result2history(result): if isinstance(result["history"], list): return pd.concat([pd.DataFrame(hist["loss"]).assign(fold=i) for i, hist in enumerate(result["history"])]) else: return pd.DataFrame(result["history"]["loss"]) if tid is None: tid = self.valid_tid() res = [result2history(t["result"]).assign(tid=t["tid"]) for t in self.trials if t["tid"] in _listify(tid)] df = pd.concat(res) fold_name = ["fold"] if "fold" in df else [] df = _put_first(df, ["tid"] + fold_name + ["epoch"]) return df
Get train history as pd.DataFrame
def merge_ligolws(elem): ligolws = [child for child in elem.childNodes if child.tagName == ligolw.LIGO_LW.tagName] if ligolws: dest = ligolws.pop(0) for src in ligolws: map(dest.appendChild, src.childNodes) if src.parentNode is not None: src.parentNode.removeChild(src) return elem
Merge all LIGO_LW elements that are immediate children of elem by appending their children to the first.
def make_tar_stream(build_context, buffer): tf = tarfile.TarFile(fileobj=buffer, mode='w') for context_path, fileobj in build_context.items(): if getattr(fileobj, 'localpath', None) is not None: tf.add(fileobj.localpath, arcname=context_path) else: tar_add_bytes(tf, context_path, fileobj.read('rb')) tf.close()
Write a tar stream of the build context to the provided buffer Args: build_context (Mapping[str, pyccc.FileReferenceBase]): dict mapping filenames to file references buffer (io.BytesIO): writable binary mode buffer
def fields(self): result = self._get_key_values('fields') for key, value in result.items(): if not isinstance(value, list): result[key] = [value] for key, value in result.items(): schema = get_schema_from_type(key) for obj in value: if obj not in schema._declared_fields: raise InvalidField("{} has no attribute {}".format(schema.__name__, obj)) return result
Return fields wanted by client. :return dict: a dict of sparse fieldsets information Return value will be a dict containing all fields by resource, for example:: { "user": ['name', 'email'], }
def iter_content(self, chunk_size=10 * 1024, decode_unicode=None): if self._content_consumed: raise RuntimeError( 'The content for this response was already consumed' ) def generate(): while 1: chunk = self.raw.read(chunk_size) if not chunk: break yield chunk self._content_consumed = True gen = generate() if 'gzip' in self.headers.get('content-encoding', ''): gen = stream_decode_gzip(gen) if decode_unicode is None: decode_unicode = self.config.get('decode_unicode') if decode_unicode: gen = stream_decode_response_unicode(gen, self) return gen
Iterates over the response data. This avoids reading the content at once into memory for large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place.
def open(self, url): protocol, location = self.split(url) if protocol == self.protocol: return self.find(location) else: return None
Open a document at the specified url. @param url: A document URL. @type url: str @return: A file pointer to the document. @rtype: StringIO
def _flip_feature(self, feature, parent_len): copy = feature.copy() if copy.strand == 0: copy.strand = 1 else: copy.strand = 0 copy.start = parent_len - copy.start copy.stop = parent_len - copy.stop copy.start, copy.stop = copy.stop, copy.start return copy
Adjust a feature's location when flipping DNA. :param feature: The feature to flip. :type feature: coral.Feature :param parent_len: The length of the sequence to which the feature belongs. :type parent_len: int
def to_list(self): ret = OrderedDict() for attrname in self.attrs: ret[attrname] = self.__getattribute__(attrname) return ret
Returns list containing values of attributes listed in self.attrs
def ISINSTANCE(instance, A_tuple): try: instance = instance._redpipe_future_result except AttributeError: pass return isinstance(instance, A_tuple)
Allows you to do isinstance checks on futures. Really, I discourage this because duck-typing is usually better. But this can provide you with a way to use isinstance with futures. Works with other objects too. :param instance: :param A_tuple: :return:
def call(self, addr, args, continue_at, cc=None): self.inhibit_autoret = True if cc is None: cc = self.cc call_state = self.state.copy() ret_addr = self.make_continuation(continue_at) saved_local_vars = list(zip(self.local_vars, map(lambda name: getattr(self, name), self.local_vars))) simcallstack_entry = (self.state.regs.sp if hasattr(self.state.regs, "sp") else None, self.arguments, saved_local_vars, self.state.regs.lr if self.state.arch.lr_offset is not None else None) cc.setup_callsite(call_state, ret_addr, args) call_state.callstack.top.procedure_data = simcallstack_entry if isinstance(call_state.addr, SootAddressDescriptor): pass elif call_state.libc.ppc64_abiv == 'ppc64_1': call_state.regs.r2 = self.state.mem[addr + 8:].long.resolved addr = call_state.mem[addr:].long.resolved elif call_state.arch.name in ('MIPS32', 'MIPS64'): call_state.regs.t9 = addr self._exit_action(call_state, addr) self.successors.add_successor(call_state, addr, call_state.solver.true, 'Ijk_Call') if o.DO_RET_EMULATION in self.state.options: ret_state = self.state.copy() cc.setup_callsite(ret_state, ret_addr, args) ret_state.callstack.top.procedure_data = simcallstack_entry guard = ret_state.solver.true if o.TRUE_RET_EMULATION_GUARD in ret_state.options else ret_state.solver.false self.successors.add_successor(ret_state, ret_addr, guard, 'Ijk_FakeRet')
Add an exit representing calling another function via pointer. :param addr: The address of the function to call :param args: The list of arguments to call the function with :param continue_at: Later, when the called function returns, execution of the current procedure will continue in the named method. :param cc: Optional: use this calling convention for calling the new function. Default is to use the current convention.
def ckw02(handle, begtim, endtim, inst, ref, segid, nrec, start, stop, quats, avvs, rates): handle = ctypes.c_int(handle) begtim = ctypes.c_double(begtim) endtim = ctypes.c_double(endtim) inst = ctypes.c_int(inst) ref = stypes.stringToCharP(ref) segid = stypes.stringToCharP(segid) start = stypes.toDoubleVector(start) stop = stypes.toDoubleVector(stop) rates = stypes.toDoubleVector(rates) quats = stypes.toDoubleMatrix(quats) avvs = stypes.toDoubleMatrix(avvs) nrec = ctypes.c_int(nrec) libspice.ckw02_c(handle, begtim, endtim, inst, ref, segid, nrec, start, stop, quats, avvs, rates)
Write a type 2 segment to a C-kernel. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw02_c.html :param handle: Handle of an open CK file. :type handle: int :param begtim: The beginning encoded SCLK of the segment. :type begtim: float :param endtim: The ending encoded SCLK of the segment. :type endtim: float :param inst: The NAIF instrument ID code. :type inst: int :param ref: The reference frame of the segment. :type ref: str :param segid: Segment identifier. :type segid: str :param nrec: Number of pointing records. :type nrec: int :param start: Encoded SCLK interval start times. :type start: Array of floats :param stop: Encoded SCLK interval stop times. :type stop: Array of floats :param quats: Quaternions representing instrument pointing. :type quats: Nx4-Element Array of floats :param avvs: Angular velocity vectors. :type avvs: Nx3-Element Array of floats :param rates: Number of seconds per tick for each interval. :type rates: Array of floats
def _register_transaction(self, send_msg, recv_msg, coroutine_recv, coroutine_abrt, get_key=None, inter_msg=None): if get_key is None: get_key = lambda x: None if inter_msg is None: inter_msg = [] self._msgs_registered[send_msg.__msgtype__] = ([recv_msg.__msgtype__] + [x.__msgtype__ for x, _ in inter_msg], get_key, None, None, []) self._msgs_registered[recv_msg.__msgtype__] = ( [], get_key, coroutine_recv, coroutine_abrt, [recv_msg.__msgtype__] + [x.__msgtype__ for x, _ in inter_msg]) self._transactions[recv_msg.__msgtype__] = {} for msg_class, handler in inter_msg: self._msgs_registered[msg_class.__msgtype__] = ([], get_key, handler, None, []) self._transactions[msg_class.__msgtype__] = {}
Register a type of message to be sent. After this message has been sent, if the answer is received, callback_recv is called. If the remote server becomes dones, calls callback_abrt. :param send_msg: class of message to be sent :param recv_msg: message that the server should send in response :param get_key: receive a `send_msg` or `recv_msg` as input, and returns the "key" (global identifier) of the message :param coroutine_recv: callback called (on the event loop) when the transaction succeed, with, as input, `recv_msg` and eventually other args given to .send :param coroutine_abrt: callback called (on the event loop) when the transaction fails, with, as input, `recv_msg` and eventually other args given to .send :param inter_msg: a list of `(message_class, coroutine_recv)`, that can be received during the resolution of the transaction but will not finalize it. `get_key` is used on these `message_class` to get the key of the transaction.
def email_finder(self, domain=None, company=None, first_name=None, last_name=None, full_name=None, raw=False): params = self.base_params if not domain and not company: raise MissingCompanyError( 'You must supply at least a domain name or a company name' ) if domain: params['domain'] = domain elif company: params['company'] = company if not(first_name and last_name) and not full_name: raise MissingNameError( 'You must supply a first name AND a last name OR a full name' ) if first_name and last_name: params['first_name'] = first_name params['last_name'] = last_name elif full_name: params['full_name'] = full_name endpoint = self.base_endpoint.format('email-finder') res = self._query_hunter(endpoint, params, raw=raw) if raw: return res email = res['email'] score = res['score'] return email, score
Find the email address of a person given its name and company's domain. :param domain: The domain of the company where the person works. Must be defined if company is not. :param company: The name of the company where the person works. Must be defined if domain is not. :param first_name: The first name of the person. Must be defined if full_name is not. :param last_name: The last name of the person. Must be defined if full_name is not. :param full_name: The full name of the person. Must be defined if first_name AND last_name are not. :param raw: Gives back the entire response instead of just email and score. :return: email and score as a tuple.
def find_single(decl_matcher, decls, recursive=True): answer = matcher.find(decl_matcher, decls, recursive) if len(answer) == 1: return answer[0]
Returns a reference to the declaration, that match `decl_matcher` defined criteria. if a unique declaration could not be found the method will return None. :param decl_matcher: Python callable object, that takes one argument - reference to a declaration :param decls: the search scope, :class:declaration_t object or :class:declaration_t objects list t :param recursive: boolean, if True, the method will run `decl_matcher` on the internal declarations too
def _get_fill(arr: ABCSparseArray) -> np.ndarray: try: return np.asarray(arr.fill_value, dtype=arr.dtype.subtype) except ValueError: return np.asarray(arr.fill_value)
Create a 0-dim ndarray containing the fill value Parameters ---------- arr : SparseArray Returns ------- fill_value : ndarray 0-dim ndarray with just the fill value. Notes ----- coerce fill_value to arr dtype if possible int64 SparseArray can have NaN as fill_value if there is no missing
def get_groups_dict(self) -> Dict: return { k: deserializer.inventory.InventoryElement.serialize(v).dict() for k, v in self.groups.items() }
Returns serialized dictionary of groups from inventory
def mean(name, add, match): ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if name not in __reg__: __reg__[name] = {} __reg__[name]['val'] = 0 __reg__[name]['total'] = 0 __reg__[name]['count'] = 0 for event in __events__: try: event_data = event['data']['data'] except KeyError: event_data = event['data'] if salt.utils.stringutils.expr_match(event['tag'], match): if add in event_data: try: comp = int(event_data) except ValueError: continue __reg__[name]['total'] += comp __reg__[name]['count'] += 1 __reg__[name]['val'] = __reg__[name]['total'] / __reg__[name]['count'] return ret
Accept a numeric value from the matched events and store a running average of the values in the given register. If the specified value is not numeric it will be skipped USAGE: .. code-block:: yaml foo: reg.mean: - add: data_field - match: my/custom/event
def get_mod_site_name(mod_condition): if mod_condition.residue is None: mod_str = abbrevs[mod_condition.mod_type] else: mod_str = mod_condition.residue mod_pos = mod_condition.position if \ mod_condition.position is not None else '' name = ('%s%s' % (mod_str, mod_pos)) return name
Return site names for a modification.
def file_match_any(self, filename): if filename.startswith('.' + os.sep): filename = filename[len(os.sep) + 1:] if os.sep != '/': filename = filename.replace(os.sep, '/') for selector in self.file_selectors: if (selector.pattern.endswith('/') and filename.startswith(selector.pattern)): return True if fnmatch.fnmatch(filename, selector.pattern): return True return False
Match any filename.
def fromProfile(cls, profile): session = bones.SessionAPI.fromProfile(profile) return cls(session)
Return an `Origin` from a given configuration profile. :see: `ProfileStore`.
def as_variable(obj, name=None) -> 'Union[Variable, IndexVariable]': from .dataarray import DataArray if isinstance(obj, DataArray): obj = obj.variable if isinstance(obj, Variable): obj = obj.copy(deep=False) elif isinstance(obj, tuple): try: obj = Variable(*obj) except (TypeError, ValueError) as error: raise error.__class__('Could not convert tuple of form ' '(dims, data[, attrs, encoding]): ' '{} to Variable.'.format(obj)) elif utils.is_scalar(obj): obj = Variable([], obj) elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None: obj = Variable(obj.name, obj) elif isinstance(obj, (set, dict)): raise TypeError( "variable %r has invalid type %r" % (name, type(obj))) elif name is not None: data = as_compatible_data(obj) if data.ndim != 1: raise MissingDimensionsError( 'cannot set variable %r with %r-dimensional data ' 'without explicit dimension names. Pass a tuple of ' '(dims, data) instead.' % (name, data.ndim)) obj = Variable(name, data, fastpath=True) else: raise TypeError('unable to convert object into a variable without an ' 'explicit list of dimensions: %r' % obj) if name is not None and name in obj.dims: if obj.ndim != 1: raise MissingDimensionsError( '%r has more than 1-dimension and the same name as one of its ' 'dimensions %r. xarray disallows such variables because they ' 'conflict with the coordinates used to label ' 'dimensions.' % (name, obj.dims)) obj = obj.to_index_variable() return obj
Convert an object into a Variable. Parameters ---------- obj : object Object to convert into a Variable. - If the object is already a Variable, return a shallow copy. - Otherwise, if the object has 'dims' and 'data' attributes, convert it into a new Variable. - If all else fails, attempt to convert the object into a Variable by unpacking it into the arguments for creating a new Variable. name : str, optional If provided: - `obj` can be a 1D array, which is assumed to label coordinate values along a dimension of this given name. - Variables with name matching one of their dimensions are converted into `IndexVariable` objects. Returns ------- var : Variable The newly created variable.
def extract_response(raw_response): data = urlread(raw_response) if is_success_response(raw_response): return data elif is_failure_response(raw_response): raise RemoteExecuteError(data) elif is_invalid_response(raw_response): raise InvalidResponseError(data) else: raise UnknownStatusError(data)
Extract requests response object. only extract those status_code in [200, 300). :param raw_response: a requests.Resposne object. :return: content of response.
def channels_archive(self, room_id, **kwargs): return self.__call_api_post('channels.archive', roomId=room_id, kwargs=kwargs)
Archives a channel.
def __getIp6Address(self, addressType): addrType = ['link local', 'global', 'rloc', 'mesh EID'] addrs = [] globalAddr = [] linkLocal64Addr = '' rlocAddr = '' meshEIDAddr = '' addrs = self.__sendCommand('ipaddr') for ip6Addr in addrs: if ip6Addr == 'Done': break ip6AddrPrefix = ip6Addr.split(':')[0] if ip6AddrPrefix == 'fe80': if ip6Addr.split(':')[4] != '0': linkLocal64Addr = ip6Addr elif ip6Addr.startswith(self.meshLocalPrefix): if ip6Addr.split(':')[4] == '0': rlocAddr = ip6Addr else: meshEIDAddr = ip6Addr else: if ip6Addr != None: globalAddr.append(ip6Addr) else: pass if addressType == addrType[0]: return linkLocal64Addr elif addressType == addrType[1]: return globalAddr elif addressType == addrType[2]: return rlocAddr elif addressType == addrType[3]: return meshEIDAddr else: pass
get specific type of IPv6 address configured on thread device Args: addressType: the specific type of IPv6 address link local: link local unicast IPv6 address that's within one-hop scope global: global unicast IPv6 address rloc: mesh local unicast IPv6 address for routing in thread network mesh EID: mesh Endpoint Identifier Returns: IPv6 address string
def _decode_unicode(value, charset, errors): fallback = None if errors.startswith('fallback:'): fallback = errors[9:] errors = 'strict' try: return value.decode(charset, errors) except UnicodeError, e: if fallback is not None: return value.decode(fallback, 'replace') from werkzeug.exceptions import HTTPUnicodeError raise HTTPUnicodeError(str(e))
Like the regular decode function but this one raises an `HTTPUnicodeError` if errors is `strict`.
def createWorkers(self, num_workers): for i in range(num_workers): self.workers.append(WorkerThread(self._requests_queue))
Add num_workers worker threads to the pool. ``poll_timout`` sets the interval in seconds (int or float) for how ofte threads should check whether they are dismissed, while waiting for requests.
def delete(self): try: self._conn.delete("/ws/DataStream/{}".format(self.get_stream_id())) except DeviceCloudHttpException as http_excpeption: if http_excpeption.response.status_code == 404: raise NoSuchStreamException() else: raise http_excpeption
Delete this stream from Device Cloud along with its history This call will return None on success and raise an exception in the event of an error performing the deletion. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has already been deleted
def _FieldSkipper(): WIRETYPE_TO_SKIPPER = [ _SkipVarint, _SkipFixed64, _SkipLengthDelimited, _SkipGroup, _EndGroup, _SkipFixed32, _RaiseInvalidWireType, _RaiseInvalidWireType, ] wiretype_mask = wire_format.TAG_TYPE_MASK def SkipField(buffer, pos, end, tag_bytes): wire_type = ord(tag_bytes[0:1]) & wiretype_mask return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end) return SkipField
Constructs the SkipField function.
def make_edl(timestamps, name): fpses = {} out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name) rec_in = 0 for index, timestamp in enumerate(timestamps): if timestamp['file'] not in fpses: fpses[timestamp['file']] = get_fps(timestamp['file']) fps = fpses[timestamp['file']] n = str(index + 1).zfill(4) time_in = timestamp['start'] time_out = timestamp['end'] duration = time_out - time_in rec_out = rec_in + duration full_name = 'reel_{}'.format(n) filename = timestamp['file'] out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps) rec_in = rec_out with open(name, 'w') as outfile: outfile.write(out)
Converts an array of ordered timestamps into an EDL string
def finalize(self): self.set_title("{} Intercluster Distance Map (via {})".format( self.estimator.__class__.__name__, self.embedding.upper() )) self.ax.set_xticks([0]) self.ax.set_yticks([0]) self.ax.set_xticklabels([]) self.ax.set_yticklabels([]) self.ax.set_xlabel("PC2") self.ax.set_ylabel("PC1") if self.legend: self._make_size_legend() return self.ax
Finalize the visualization to create an "origin grid" feel instead of the default matplotlib feel. Set the title, remove spines, and label the grid with components. This function also adds a legend from the sizes if required.
def register_callback_duplicate(self, func, serialised=True): self.__client.register_callback_duplicate(partial(self.__callback_payload_only, func), serialised=serialised)
Register a callback for resource creation but where the resource already exists in Iotic Space. In this case the existing reference is passed to you. If `serialised` is not set, the callbacks might arrive in a different order to they were requested. The payload passed to your callback is an OrderedDict with the following keys #!python r : R_ENTITY, R_FEED, etc # the type of existing resource lid : <name> # the local name of the # existing resource id : <GUID> # the global Id of the # existing resource epId : <GUID> # the global Id of your agent `Note` resource types are defined [here](../Core/Const.m.html) `Example` #!python def duplicated_callback(args): print(args) ... client.register_callback_created(duplicated_callback) This would print out something like the following on re-creation of an R_ENTITY #!python OrderedDict([(u'lid', u'new_thing1'), (u'r', 1), (u'epId', u'ffd47b75ea786f55c76e337cdc47665a'), (u'id', u'3f11df0a09588a6a1a9732e3837765f8')]))
def write_worksheets(workbook, data_list, result_info_key, identifier_keys): worksheet_keys = get_worksheet_keys(data_list[0], result_info_key) for key in worksheet_keys: title = key.split('/')[1] title = utilities.convert_snake_to_title_case(title) title = KEY_TO_WORKSHEET_MAP.get(title, title) if key == 'property/nod': create_property_nod_worksheets(workbook, data_list, result_info_key, identifier_keys) else: worksheet = workbook.create_sheet(title=title[:31]) processed_data = process_data(key, data_list, result_info_key, identifier_keys) write_data(worksheet, processed_data) workbook.remove_sheet(workbook.active)
Writes rest of the worksheets to workbook. Args: workbook: workbook to write into data_list: Analytics API data as a list of dicts result_info_key: the key in api_data dicts that contains the data results identifier_keys: the list of keys used as requested identifiers (address, zipcode, block_id, etc)
def CallHwclock(logger): command = ['/sbin/hwclock', '--hctosys'] try: subprocess.check_call(command) except subprocess.CalledProcessError: logger.warning('Failed to sync system time with hardware clock.') else: logger.info('Synced system time with hardware clock.')
Sync clock using hwclock. Args: logger: logger object, used to write to SysLog and serial port.
def zincrby(self, key, increment, member): if not isinstance(increment, (int, float)): raise TypeError("increment argument must be int or float") fut = self.execute(b'ZINCRBY', key, increment, member) return wait_convert(fut, int_or_float)
Increment the score of a member in a sorted set. :raises TypeError: increment is not float or int
def get_area_bbox(self, crs=None): bbox_list = [BBox(shape.bounds, crs=self.crs) for shape in self.shape_list] area_minx = min([bbox.lower_left[0] for bbox in bbox_list]) area_miny = min([bbox.lower_left[1] for bbox in bbox_list]) area_maxx = max([bbox.upper_right[0] for bbox in bbox_list]) area_maxy = max([bbox.upper_right[1] for bbox in bbox_list]) bbox = BBox([area_minx, area_miny, area_maxx, area_maxy], crs=self.crs) if crs is None: return bbox return bbox.transform(crs)
Returns a bounding box of the entire area :param crs: Coordinate reference system in which the bounding box should be returned. If None the CRS will be the default CRS of the splitter. :type crs: CRS or None :return: A bounding box of the area defined by the `shape_list` :rtype: BBox
def mergein_marketdata_list(cls, client, option_positions): ids = cls._extract_ids(option_positions) mds = OptionMarketdata.quotes_by_instrument_ids(client, ids) results = [] for op in option_positions: md = [x for x in mds if x['instrument'] == op['option']][0] merged_dict = dict(list(op.items()) + list(md.items())) results.append(merged_dict) return results
Fetch and merge in Marketdata for each option position
def delete_attachment(self, id): url = self._get_url('attachment/' + str(id)) return self._session.delete(url)
Delete attachment by id. :param id: ID of the attachment to delete :type id: str
def simple_highlight(img1, img2, opts): try: diff, ((x1, y1), (x2, y2)) = best_diff(img1, img2, opts) except KeyboardInterrupt: return None, None diff = diff.filter(ImageFilter.MaxFilter(9)) diff = tweak_diff(diff, opts.opacity) mask1 = Image.new('L', img1.size, 0xff) mask2 = Image.new('L', img2.size, 0xff) mask1.paste(diff, (x1, y1)) mask2.paste(diff, (x2, y2)) return mask1, mask2
Try to align the two images to minimize pixel differences. Produces two masks for img1 and img2. The algorithm works by comparing every possible alignment of the images, finding the aligment that minimzes the differences, and then smoothing it a bit to reduce spurious matches in areas that are perceptibly different (e.g. text).
def set_setting(self, setting, value, area='1', validate_value=True): setting = setting.lower() if setting not in CONST.ALL_SETTINGS: raise AbodeException(ERROR.INVALID_SETTING, CONST.ALL_SETTINGS) if setting in CONST.PANEL_SETTINGS: url = CONST.SETTINGS_URL data = self._panel_settings(setting, value, validate_value) elif setting in CONST.AREA_SETTINGS: url = CONST.AREAS_URL data = self._area_settings(area, setting, value, validate_value) elif setting in CONST.SOUND_SETTINGS: url = CONST.SOUNDS_URL data = self._sound_settings(area, setting, value, validate_value) elif setting in CONST.SIREN_SETTINGS: url = CONST.SIREN_URL data = self._siren_settings(setting, value, validate_value) return self.send_request(method="put", url=url, data=data)
Set an abode system setting to a given value.
def get_month_start_date(self): now = timezone.now() return timezone.datetime(day=1, month=now.month, year=now.year, tzinfo=now.tzinfo)
Returns the first day of the current month
def gettransactionsurl(idcred, *args, **kwargs): getparams = [] if kwargs: try: getparams.append("offset=%s" % kwargs["offset"]) except Exception as ex: pass try: getparams.append("limit=%s" % kwargs["limit"]) except Exception as ex: pass url = getmambuurl(*args,**kwargs) + "loans/" + idcred + "/transactions" + ( "" if len(getparams) == 0 else "?" + "&".join(getparams) ) return url
Request loan Transactions URL. If idcred is set, you'll get a response adequate for a MambuTransactions object. There's a MambuTransaction object too, but you'll get a list first and each element of it will be automatically converted to a MambuTransaction object that you may use. If not set, you'll get a Jar Jar Binks object, or something quite strange and useless as JarJar. A MambuError must likely since I haven't needed it for anything but for transactions of one and just one loan account. See mambutransaction module and pydoc for further information. Currently implemented filter parameters: * limit * offset See Mambu official developer documentation for further details, and info on parameters that may be implemented here in the future.
def house(self): house = self.chart.houses.getObjectHouse(self.obj) return house
Returns the object's house.
def due(self): invoice_charges = Charge.objects.filter(invoice=self) invoice_transactions = Transaction.successful.filter(invoice=self) return total_amount(invoice_charges) - total_amount(invoice_transactions)
The amount due for this invoice. Takes into account all entities in the invoice. Can be < 0 if the invoice was overpaid.
def put_archive(self, container, path, data): params = {'path': path} url = self._url('/containers/{0}/archive', container) res = self._put(url, params=params, data=data) self._raise_for_status(res) return res.status_code == 200
Insert a file or folder in an existing container using a tar archive as source. Args: container (str): The container where the file(s) will be extracted path (str): Path inside the container where the file(s) will be extracted. Must exist. data (bytes): tar data to be extracted Returns: (bool): True if the call succeeds. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def get_graph_data(self, graph, benchmark): if benchmark.get('params'): param_iter = enumerate(zip(itertools.product(*benchmark['params']), graph.get_steps())) else: param_iter = [(None, (None, graph.get_steps()))] for j, (param, steps) in param_iter: if param is None: entry_name = benchmark['name'] else: entry_name = benchmark['name'] + '({0})'.format(', '.join(param)) start_revision = self._get_start_revision(graph, benchmark, entry_name) threshold = self._get_threshold(graph, benchmark, entry_name) if start_revision is None: continue steps = [step for step in steps if step[1] >= start_revision] yield j, entry_name, steps, threshold
Iterator over graph data sets Yields ------ param_idx Flat index to parameter permutations for parameterized benchmarks. None if benchmark is not parameterized. entry_name Name for the data set. If benchmark is non-parameterized, this is the benchmark name. steps Steps to consider in regression detection. threshold User-specified threshold for regression detection.
def post(self, url: StrOrURL, *, data: Any=None, **kwargs: Any) -> '_RequestContextManager': return _RequestContextManager( self._request(hdrs.METH_POST, url, data=data, **kwargs))
Perform HTTP POST request.
def receive(self): self._connect() result = self.recvConn.receive() if self._hasprop(result, "error"): raise CommandError(result["error"]) if self._hasprop(result, "log"): self.logs.append(result["log"]) if self._hasprop(result, "subscription"): sub = result["subscription"] if not (sub in self.subs): self.subs[sub] = [] self.subs[sub].append(result) root = os.path.normpath(os.path.normcase(result["root"])) if not root in self.sub_by_root: self.sub_by_root[root] = {} if not sub in self.sub_by_root[root]: self.sub_by_root[root][sub] = [] self.sub_by_root[root][sub].append(result) return result
receive the next PDU from the watchman service If the client has activated subscriptions or logs then this PDU may be a unilateral PDU sent by the service to inform the client of a log event or subscription change. It may also simply be the response portion of a request initiated by query. There are clients in production that subscribe and call this in a loop to retrieve all subscription responses, so care should be taken when making changes here.
def swag_from( specs=None, filetype=None, endpoint=None, methods=None, validation=False, schema_id=None, data=None, definition=None, validation_function=None, validation_error_handler=None): def resolve_path(function, filepath): if not filepath.startswith('/'): if not hasattr(function, 'root_path'): function.root_path = get_root_path(function) res = os.path.join(function.root_path, filepath) return res return filepath def set_from_filepath(function): final_filepath = resolve_path(function, specs) function.swag_type = filetype or specs.split('.')[-1] if endpoint or methods: if not hasattr(function, 'swag_paths'): function.swag_paths = {} if not endpoint and not methods: function.swag_path = final_filepath elif endpoint and methods: for verb in methods: key = "{}_{}".format(endpoint, verb.lower()) function.swag_paths[key] = final_filepath elif endpoint and not methods: function.swag_paths[endpoint] = final_filepath elif methods and not endpoint: for verb in methods: function.swag_paths[verb.lower()] = final_filepath def set_from_specs_dict(function): function.specs_dict = specs def decorator(function): if isinstance(specs, string_types): set_from_filepath(function) swag_path = getattr(function, 'swag_path', None) swag_paths = getattr(function, 'swag_paths', None) validate_args = { 'filepath': swag_path or swag_paths, 'root': getattr(function, 'root_path', None) } if isinstance(specs, dict): set_from_specs_dict(function) validate_args = {'specs': specs} @wraps(function) def wrapper(*args, **kwargs): if validation is True: validate( data, schema_id or definition, validation_function=validation_function, validation_error_handler=validation_error_handler, **validate_args ) return function(*args, **kwargs) return wrapper return decorator
Takes a filename.yml, a dictionary or object and loads swagger specs. :param specs: a filepath, a dictionary or an object :param filetype: yml or yaml (json and py to be implemented) :param endpoint: endpoint to build definition name :param methods: method to build method based specs :param validation: perform validation? :param schema_id: Definition id ot name to use for validation :param data: data to validate (default is request.json) :param definition: alias to schema_id :param validation_function: custom validation function which takes the positional arguments: data to be validated at first and schema to validate against at second :param validation_error_handler: custom function to handle exceptions thrown when validating which takes the exception thrown as the first, the data being validated as the second and the schema being used to validate as the third argument
def get_battery_info(self) -> dict: output, _ = self._execute( '-s', self.device_sn, 'shell', 'dumpsys', 'battery') battery_status = re.split('\n |: ', output[33:].strip()) return dict(zip(battery_status[::2], battery_status[1::2]))
Show device battery information. Returns: A dict. For example: {'AC powered': 'false', 'Charge counter': '0', 'Max charging current': '0', 'Max charging voltage': '0', 'USB powered': 'false', 'Wireless powered': 'false', 'health': '2', 'level': '67', 'present': 'true', 'scale': '100', 'status': '3', 'technology': 'Li-poly', 'temperature': '310', 'voltage': '3965'}
def _create_socket(self): log.warning('No certificate check is performed for SSL connections') s = super(SSL, self)._create_socket() return wrap_socket(s)
Creates a new SSL enabled socket and sets its timeout.
def step(self, **kwargs): kwargs.setdefault('linestyle', kwargs.pop('where', 'steps-post')) data = self.append(self.value[-1:], inplace=False) return data.plot(**kwargs)
Create a step plot of this series
def content(self, value): value = self._prepend_seperator(value) self._content = value
The main component of the log message. The content field is a freeform field that often begins with the process ID (pid) of the program that created the message.
def find_transitionid_by_name(self, issue, transition_name): transitions_json = self.transitions(issue) id = None for transition in transitions_json: if transition["name"].lower() == transition_name.lower(): id = transition["id"] break return id
Get a transitionid available on the specified issue to the current user. Look at https://developer.atlassian.com/static/rest/jira/6.1.html#d2e1074 for json reference :param issue: ID or key of the issue to get the transitions from :param trans_name: iname of transition we are looking for
def get_tfidf(self, term, document, normalized=False): tf = self.get_term_frequency(term, document) if tf != 0.0: df = 1 + self.get_document_frequency(term) n = 2 + len(self._documents) if normalized: tf /= self.get_document_length(document) return tf * math.log10(n / df) else: return 0.0
Returns the Term-Frequency Inverse-Document-Frequency value for the given term in the specified document. If normalized is True, term frequency will be divided by the document length.
def _set_level_depth(self, optobj): has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs def _init_level(rec): if rec.level is None: if rec.parents: rec.level = min(_init_level(rec) for rec in rec.parents) + 1 else: rec.level = 0 return rec.level def _init_depth(rec): if rec.depth is None: if rec.parents: rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1 else: rec.depth = 0 return rec.depth def _init_reldepth(rec): if not hasattr(rec, 'reldepth'): up_terms = rec.get_goterms_upper() if up_terms: rec.reldepth = max(_init_reldepth(rec) for rec in up_terms) + 1 else: rec.reldepth = 0 return rec.reldepth for rec in self.values(): if has_relationship: if rec.depth is None: _init_reldepth(rec) if rec.level is None: _init_level(rec) if rec.depth is None: _init_depth(rec)
Set level, depth and add inverted relationships.
def process_response(self, request, response): self._set_request_auth_type_metric(request) self._set_request_user_agent_metrics(request) self._set_request_referer_metric(request) self._set_request_user_id_metric(request) return response
Add metrics for various details of the request.
def MatrixSolveLs(a, rhs, l2_reg): r = np.empty(rhs.shape).astype(a.dtype) for coord in np.ndindex(a.shape[:-2]): pos = coord + (Ellipsis,) r[pos] = np.linalg.lstsq(a[pos], rhs[pos])[0] return r,
Matrix least-squares solve op.
def route(cls, path): if not path.startswith('/'): raise ValueError('Routes must start with "/"') def wrap(fn): setattr(fn, cls.ROUTE_ATTRIBUTE, path) return fn return wrap
A decorator to indicate that a method should be a routable HTTP endpoint. .. code-block:: python from compactor.process import Process class WebProcess(Process): @Process.route('/hello/world') def hello_world(self, handler): return handler.write('<html><title>hello world</title></html>') The handler passed to the method is a tornado RequestHandler. WARNING: This interface is alpha and may change in the future if or when we remove tornado as a compactor dependency. :param path: The endpoint to route to this method. :type path: ``str``
def run(self, x=None): r logger.info('Running ReactiveTransport') if x is None: x = np.zeros(shape=[self.Np, ], dtype=float) self[self.settings['quantity']] = x self._update_physics() x = self._run_reactive(x=x) self[self.settings['quantity']] = x
r""" Builds the A and b matrices, and calls the solver specified in the ``settings`` attribute. Parameters ---------- x : ND-array Initial guess of unknown variable
def read_local_ifaddrs (self): if os.name != 'posix': return [] try: from linkcheck.network import IfConfig except ImportError: return [] ifaddrs = [] ifc = IfConfig() for iface in ifc.getInterfaceList(flags=IfConfig.IFF_UP): addr = ifc.getAddr(iface) if addr: ifaddrs.append(addr) return ifaddrs
IP addresses for all active interfaces. @return: list of IP addresses @rtype: list of strings