diff --git a/piker/ui/_pathops.py b/piker/data/_formatters.py
similarity index 54%
rename from piker/ui/_pathops.py
rename to piker/data/_formatters.py
index 8b3eecaf..3e440fe8 100644
--- a/piker/ui/_pathops.py
+++ b/piker/data/_formatters.py
@@ -14,7 +14,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""
-Super fast ``QPainterPath`` generation related operator routines.
+Pre-(path)-graphics formatted x/y nd/1d rendering subsystem.
"""
from __future__ import annotations
@@ -24,44 +24,24 @@ from typing import (
)
import msgspec
+from msgspec import field
import numpy as np
from numpy.lib import recfunctions as rfn
-from numba import njit, float64, int64 # , optional
-from msgspec import field
-# import pyqtgraph as pg
-# from PyQt5 import QtGui
-# from PyQt5.QtCore import QLineF, QPointF
-from ..data._sharedmem import (
+from ._sharedmem import (
ShmArray,
)
-# from .._profile import pg_profile_enabled, ms_slower_then
-from ._compression import (
- ds_m4,
+from ._pathops import (
+ path_arrays_from_ohlc,
)
if TYPE_CHECKING:
- from ._flows import (
- Renderer,
- Flow,
+ from ._dataviz import (
+ Viz,
)
from .._profile import Profiler
-def by_index_and_key(
- renderer: Renderer,
- array: np.ndarray,
- array_key: str,
- vr: tuple[int, int],
-
-) -> tuple[
- np.ndarray,
- np.ndarray,
- np.ndarray,
-]:
- return array['index'], array[array_key], 'all'
-
-
class IncrementalFormatter(msgspec.Struct):
'''
Incrementally updating, pre-path-graphics tracking, formatter.
@@ -73,31 +53,71 @@ class IncrementalFormatter(msgspec.Struct):
'''
shm: ShmArray
- flow: Flow
-
- # last read from shm (usually due to an update call)
- _last_read: tuple[
- int,
- int,
- np.ndarray
-
- ]
+ viz: Viz
@property
- def last_read(self) -> tuple | None:
- return self._last_read
+ def index_field(self) -> 'str':
+ '''
+ Value (``str``) used to look up the "index series" from the
+ underlying source ``numpy`` struct-array; delegate directly to
+ the managing ``Viz``.
+
+ '''
+ return self.viz.index_field
+
+ # Incrementally updated xy ndarray formatted data, a pre-1d
+ # format which is updated and cached independently of the final
+ # pre-graphics-path 1d format.
+ x_nd: Optional[np.ndarray] = None
+ y_nd: Optional[np.ndarray] = None
+
+ @property
+ def xy_nd(self) -> tuple[np.ndarray, np.ndarray]:
+ return (
+ self.x_nd[self.xy_slice],
+ self.y_nd[self.xy_slice],
+ )
+
+ @property
+ def xy_slice(self) -> slice:
+ return slice(
+ self.xy_nd_start,
+ self.xy_nd_stop,
+ )
+
+ # indexes which slice into the above arrays (which are allocated
+ # based on source data shm input size) and allow retrieving
+ # incrementally updated data.
+ xy_nd_start: int | None = None
+ xy_nd_stop: int | None = None
+
+ # TODO: eventually incrementally update 1d-pre-graphics path data?
+ # x_1d: Optional[np.ndarray] = None
+ # y_1d: Optional[np.ndarray] = None
+
+ # incremental view-change state(s) tracking
+ _last_vr: tuple[float, float] | None = None
+ _last_ivdr: tuple[float, float] | None = None
+
+ @property
+ def index_step_size(self) -> float:
+ '''
+ Readonly value computed on first ``.diff()`` call.
+
+ '''
+ return self.viz.index_step()
def __repr__(self) -> str:
msg = (
f'{type(self)}: ->\n\n'
- f'fqsn={self.flow.name}\n'
+ f'fqsn={self.viz.name}\n'
f'shm_name={self.shm.token["shm_name"]}\n\n'
f'last_vr={self._last_vr}\n'
f'last_ivdr={self._last_ivdr}\n\n'
- f'xy_nd_start={self.xy_nd_start}\n'
- f'xy_nd_stop={self.xy_nd_stop}\n\n'
+ f'xy_slice={self.xy_slice}\n'
+ # f'xy_nd_stop={self.xy_nd_stop}\n\n'
)
x_nd_len = 0
@@ -121,17 +141,13 @@ class IncrementalFormatter(msgspec.Struct):
np.ndarray,
np.ndarray,
]:
- (
- last_xfirst,
- last_xlast,
- last_array,
- last_ivl,
- last_ivr,
- last_in_view,
- ) = self.last_read
-
- # TODO: can the renderer just call ``Flow.read()`` directly?
- # unpack latest source data read
+ # TODO:
+ # - can the renderer just call ``Viz.read()`` directly? unpack
+ # latest source data read
+ # - eventually maybe we can implement some kind of
+ # transform on the ``QPainterPath`` that will more or less
+ # detect the diff in "elements" terms? update diff state since
+ # we've now rendered paths.
(
xfirst,
xlast,
@@ -141,41 +157,46 @@ class IncrementalFormatter(msgspec.Struct):
in_view,
) = new_read
+ index = array['index']
+
+ # if the first index in the read array is 0 then
+ # it means the source buffer has bee completely backfilled to
+ # available space.
+ src_start = index[0]
+ src_stop = index[-1] + 1
+
+ # these are the "formatted output data" indices
+ # for the pre-graphics arrays.
+ nd_start = self.xy_nd_start
+ nd_stop = self.xy_nd_stop
+
+ if (
+ nd_start is None
+ ):
+ assert nd_stop is None
+
+ # setup to do a prepend of all existing src history
+ nd_start = self.xy_nd_start = src_stop
+ # set us in a zero-to-append state
+ nd_stop = self.xy_nd_stop = src_stop
+
+ align_index = array[self.index_field]
+
# compute the length diffs between the first/last index entry in
# the input data and the last indexes we have on record from the
# last time we updated the curve index.
- prepend_length = int(last_xfirst - xfirst)
- append_length = int(xlast - last_xlast)
+ prepend_length = int(nd_start - src_start)
+ append_length = int(src_stop - nd_stop)
# blah blah blah
# do diffing for prepend, append and last entry
return (
- slice(xfirst, last_xfirst),
+ slice(src_start, nd_start),
prepend_length,
append_length,
- slice(last_xlast, xlast),
+ slice(nd_stop, src_stop),
)
- # Incrementally updated xy ndarray formatted data, a pre-1d
- # format which is updated and cached independently of the final
- # pre-graphics-path 1d format.
- x_nd: Optional[np.ndarray] = None
- y_nd: Optional[np.ndarray] = None
-
- # indexes which slice into the above arrays (which are allocated
- # based on source data shm input size) and allow retrieving
- # incrementally updated data.
- xy_nd_start: int = 0
- xy_nd_stop: int = 0
-
- # TODO: eventually incrementally update 1d-pre-graphics path data?
- # x_1d: Optional[np.ndarray] = None
- # y_1d: Optional[np.ndarray] = None
-
- # incremental view-change state(s) tracking
- _last_vr: tuple[float, float] | None = None
- _last_ivdr: tuple[float, float] | None = None
-
def _track_inview_range(
self,
view_range: tuple[int, int],
@@ -224,8 +245,6 @@ class IncrementalFormatter(msgspec.Struct):
array_key: str,
profiler: Profiler,
- slice_to_head: int = -1,
- read_src_from_key: bool = True,
slice_to_inview: bool = True,
) -> tuple[
@@ -251,99 +270,78 @@ class IncrementalFormatter(msgspec.Struct):
post_slice,
) = self.diff(new_read)
+ # we first need to allocate xy data arrays
+ # from the source data.
if self.y_nd is None:
- # we first need to allocate xy data arrays
- # from the source data.
+ self.xy_nd_start = shm._first.value
+ self.xy_nd_stop = shm._last.value
self.x_nd, self.y_nd = self.allocate_xy_nd(
shm,
array_key,
)
- self.xy_nd_start = shm._first.value
- self.xy_nd_stop = shm._last.value
profiler('allocated xy history')
- if prepend_len:
- y_prepend = shm._array[pre_slice]
- if read_src_from_key:
- y_prepend = y_prepend[array_key]
+ # once allocated we do incremental pre/append
+ # updates from the diff with the source buffer.
+ else:
+ if prepend_len:
- (
- new_y_nd,
- y_nd_slc,
+ self.incr_update_xy_nd(
+ shm,
+ array_key,
- ) = self.incr_update_xy_nd(
- shm,
- array_key,
+ # this is the pre-sliced, "normally expected"
+ # new data that an updater would normally be
+ # expected to process, however in some cases (like
+ # step curves) the updater routine may want to do
+ # the source history-data reading itself, so we pass
+ # both here.
+ shm._array[pre_slice],
+ pre_slice,
+ prepend_len,
- # this is the pre-sliced, "normally expected"
- # new data that an updater would normally be
- # expected to process, however in some cases (like
- # step curves) the updater routine may want to do
- # the source history-data reading itself, so we pass
- # both here.
- y_prepend,
- pre_slice,
- prepend_len,
+ self.xy_nd_start,
+ self.xy_nd_stop,
+ is_append=False,
+ )
- self.xy_nd_start,
- self.xy_nd_stop,
- is_append=False,
- )
+ self.xy_nd_start -= prepend_len
+ profiler('prepended xy history: {prepend_length}')
- # y_nd_view = self.y_nd[y_nd_slc]
- self.y_nd[y_nd_slc] = new_y_nd
- # if read_src_from_key:
- # y_nd_view[:][array_key] = new_y_nd
- # else:
- # y_nd_view[:] = new_y_nd
+ if append_len:
+ self.incr_update_xy_nd(
+ shm,
+ array_key,
- self.xy_nd_start = shm._first.value
- profiler('prepended xy history: {prepend_length}')
+ shm._array[post_slice],
+ post_slice,
+ append_len,
- if append_len:
- y_append = shm._array[post_slice]
- if read_src_from_key:
- y_append = y_append[array_key]
-
- (
- new_y_nd,
- y_nd_slc,
-
- ) = self.incr_update_xy_nd(
- shm,
- array_key,
-
- y_append,
- post_slice,
- append_len,
-
- self.xy_nd_start,
- self.xy_nd_stop,
- is_append=True,
- )
- # self.y_nd[post_slice] = new_y_nd
- # self.y_nd[xy_slice or post_slice] = xy_data
- self.y_nd[y_nd_slc] = new_y_nd
- # if read_src_from_key:
- # y_nd_view[:][array_key] = new_y_nd
- # else:
- # y_nd_view[:] = new_y_nd
-
- self.xy_nd_stop = shm._last.value
- profiler('appened xy history: {append_length}')
+ self.xy_nd_start,
+ self.xy_nd_stop,
+ is_append=True,
+ )
+ self.xy_nd_stop += append_len
+ profiler('appened xy history: {append_length}')
+ # sanity
+ # slice_ln = post_slice.stop - post_slice.start
+ # assert append_len == slice_ln
view_changed: bool = False
view_range: tuple[int, int] = (ivl, ivr)
if slice_to_inview:
view_changed = self._track_inview_range(view_range)
array = in_view
- profiler(f'{self.flow.name} view range slice {view_range}')
+ profiler(f'{self.viz.name} view range slice {view_range}')
- hist = array[:slice_to_head]
+ # hist = array[:slice_to_head]
+
+ # XXX: WOA WTF TRACTOR DEBUGGING BUGGG
+ # assert 0
# xy-path data transform: convert source data to a format
# able to be passed to a `QPainterPath` rendering routine.
- if not len(hist):
+ if not len(array):
# XXX: this might be why the profiler only has exits?
return
@@ -351,7 +349,7 @@ class IncrementalFormatter(msgspec.Struct):
# x/y_data in the case where allocate_xy is
# defined?
x_1d, y_1d, connect = self.format_xy_nd_to_1d(
- hist,
+ array,
array_key,
view_range,
)
@@ -370,22 +368,22 @@ class IncrementalFormatter(msgspec.Struct):
# # assert (len(appended) - 1) == append_len
# # assert len(appended) == append_len
# print(
- # f'{self.flow.name} APPEND LEN: {append_len}\n'
- # f'{self.flow.name} APPENDED: {appended}\n'
- # f'{self.flow.name} app_tres: {app_tres}\n'
+ # f'{self.viz.name} APPEND LEN: {append_len}\n'
+ # f'{self.viz.name} APPENDED: {appended}\n'
+ # f'{self.viz.name} app_tres: {app_tres}\n'
# )
# update the last "in view data range"
if len(x_1d):
- self._last_ivdr = x_1d[0], x_1d[slice_to_head]
-
- # TODO: eventually maybe we can implement some kind of
- # transform on the ``QPainterPath`` that will more or less
- # detect the diff in "elements" terms?
- # update diff state since we've now rendered paths.
- self._last_read = new_read
+ self._last_ivdr = x_1d[0], x_1d[-1]
+ if (
+ self.index_field == 'time'
+ and (x_1d[-1] == 0.5).any()
+ ):
+ breakpoint()
profiler('.format_to_1d()')
+
return (
x_1d,
y_1d,
@@ -400,6 +398,8 @@ class IncrementalFormatter(msgspec.Struct):
# Sub-type override interface #
###############################
+ x_offset: np.ndarray = np.array([0])
+
# optional pre-graphics xy formatted data which
# is incrementally updated in sync with the source data.
# XXX: was ``.allocate_xy()``
@@ -407,7 +407,6 @@ class IncrementalFormatter(msgspec.Struct):
self,
src_shm: ShmArray,
data_field: str,
- index_field: str = 'index',
) -> tuple[
np.ndarray, # x
@@ -421,7 +420,11 @@ class IncrementalFormatter(msgspec.Struct):
'''
y_nd = src_shm._array[data_field].copy()
- x_nd = src_shm._array[index_field].copy()
+ x_nd = (
+ src_shm._array[self.index_field].copy()
+ +
+ self.x_offset
+ )
return x_nd, y_nd
# XXX: was ``.update_xy()``
@@ -440,23 +443,43 @@ class IncrementalFormatter(msgspec.Struct):
nd_stop: int,
is_append: bool,
- index_field: str = 'index',
- ) -> tuple[
- np.ndarray,
- slice,
- ]:
+ ) -> None:
# write pushed data to flattened copy
- new_y_nd = new_from_src
+ y_nd_new = new_from_src[data_field]
+ self.y_nd[read_slc] = y_nd_new
- # XXX
- # TODO: this should be returned and written by caller!
- # XXX
- # generate same-valued-per-row x support based on y shape
- if index_field != 'index':
- self.x_nd[read_slc, :] = new_from_src[index_field]
+ x_nd_new = self.x_nd[read_slc]
+ x_nd_new[:] = (
+ new_from_src[self.index_field]
+ +
+ self.x_offset
+ )
- return new_y_nd, read_slc
+ # x_nd = self.x_nd[self.xy_slice]
+ # y_nd = self.y_nd[self.xy_slice]
+ # name = self.viz.name
+ # if 'trade_rate' == name:
+ # s = 4
+ # print(
+ # f'{name.upper()}:\n'
+ # 'NEW_FROM_SRC:\n'
+ # f'new_from_src: {new_from_src}\n\n'
+
+ # f'PRE self.x_nd:'
+ # f'\n{list(x_nd[-s:])}\n'
+
+ # f'PRE self.y_nd:\n'
+ # f'{list(y_nd[-s:])}\n\n'
+
+ # f'TO WRITE:\n'
+
+ # f'x_nd_new:\n'
+ # f'{x_nd_new[0]}\n'
+
+ # f'y_nd_new:\n'
+ # f'{y_nd_new}\n'
+ # )
# XXX: was ``.format_xy()``
def format_xy_nd_to_1d(
@@ -477,9 +500,20 @@ class IncrementalFormatter(msgspec.Struct):
Return single field column data verbatim
'''
+ # NOTE: we don't include the very last datum which is filled in
+ # normally by another graphics object.
+ x_1d = array[self.index_field][:-1]
+ if (
+ self.index_field == 'time'
+ and x_1d.any()
+ and (x_1d[-1] == 0.5).any()
+ ):
+ breakpoint()
+
+ y_1d = array[array_key][:-1]
return (
- array['index'],
- array[array_key],
+ x_1d,
+ y_1d,
# 1d connection array or style-key to
# ``pg.functions.arrayToQPath()``
@@ -488,8 +522,16 @@ class IncrementalFormatter(msgspec.Struct):
class OHLCBarsFmtr(IncrementalFormatter):
+ x_offset: np.ndarray = np.array([
+ -0.5,
+ 0,
+ 0,
+ 0.5,
+ ])
- fields: list[str] = field(default_factory=lambda: ['open', 'high', 'low', 'close'])
+ fields: list[str] = field(
+ default_factory=lambda: ['open', 'high', 'low', 'close']
+ )
def allocate_xy_nd(
self,
@@ -512,13 +554,15 @@ class OHLCBarsFmtr(IncrementalFormatter):
# generate an flat-interpolated x-domain
x_nd = (
np.broadcast_to(
- ohlc_shm._array['index'][:, None],
+ ohlc_shm._array[self.index_field][:, None],
(
ohlc_shm._array.size,
# 4, # only ohlc
y_nd.shape[1],
),
- ) + np.array([-0.5, 0, 0, 0.5])
+ )
+ +
+ self.x_offset
)
assert y_nd.any()
@@ -528,112 +572,6 @@ class OHLCBarsFmtr(IncrementalFormatter):
y_nd,
)
- @staticmethod
- @njit(
- # TODO: for now need to construct this manually for readonly
- # arrays, see https://github.com/numba/numba/issues/4511
- # ntypes.tuple((float64[:], float64[:], float64[:]))(
- # numba_ohlc_dtype[::1], # contiguous
- # int64,
- # optional(float64),
- # ),
- nogil=True
- )
- def path_arrays_from_ohlc(
- data: np.ndarray,
- start: int64,
- bar_gap: float64 = 0.43,
-
- ) -> tuple[
- np.ndarray,
- np.ndarray,
- np.ndarray,
- ]:
- '''
- Generate an array of lines objects from input ohlc data.
-
- '''
- size = int(data.shape[0] * 6)
-
- x = np.zeros(
- # data,
- shape=size,
- dtype=float64,
- )
- y, c = x.copy(), x.copy()
-
- # TODO: report bug for assert @
- # /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991
- for i, q in enumerate(data[start:], start):
-
- # TODO: ask numba why this doesn't work..
- # open, high, low, close, index = q[
- # ['open', 'high', 'low', 'close', 'index']]
-
- open = q['open']
- high = q['high']
- low = q['low']
- close = q['close']
- index = float64(q['index'])
-
- istart = i * 6
- istop = istart + 6
-
- # x,y detail the 6 points which connect all vertexes of a ohlc bar
- x[istart:istop] = (
- index - bar_gap,
- index,
- index,
- index,
- index,
- index + bar_gap,
- )
- y[istart:istop] = (
- open,
- open,
- low,
- high,
- close,
- close,
- )
-
- # specifies that the first edge is never connected to the
- # prior bars last edge thus providing a small "gap"/"space"
- # between bars determined by ``bar_gap``.
- c[istart:istop] = (1, 1, 1, 1, 1, 0)
-
- return x, y, c
-
- # TODO: can we drop this frame and just use the above?
- def format_xy_nd_to_1d(
- self,
-
- array: np.ndarray,
- array_key: str,
- vr: tuple[int, int],
-
- start: int = 0, # XXX: do we need this?
- # 0.5 is no overlap between arms, 1.0 is full overlap
- w: float = 0.43,
-
- ) -> tuple[
- np.ndarray,
- np.ndarray,
- np.ndarray,
- ]:
- '''
- More or less direct proxy to the ``numba``-fied
- ``path_arrays_from_ohlc()`` (above) but with closed in kwargs
- for line spacing.
-
- '''
- x, y, c = self.path_arrays_from_ohlc(
- array,
- start,
- bar_gap=w,
- )
- return x, y, c
-
def incr_update_xy_nd(
self,
@@ -649,26 +587,55 @@ class OHLCBarsFmtr(IncrementalFormatter):
nd_stop: int,
is_append: bool,
- index_field: str = 'index',
- ) -> tuple[
- np.ndarray,
- slice,
- ]:
+ ) -> None:
# write newly pushed data to flattened copy
# a struct-arr is always passed in.
new_y_nd = rfn.structured_to_unstructured(
new_from_src[self.fields]
)
+ self.y_nd[read_slc] = new_y_nd
- # XXX
- # TODO: this should be returned and written by caller!
- # XXX
# generate same-valued-per-row x support based on y shape
- if index_field != 'index':
- self.x_nd[read_slc, :] = new_from_src[index_field]
+ x_nd_new = self.x_nd[read_slc]
+ x_nd_new[:] = np.broadcast_to(
+ new_from_src[self.index_field][:, None],
+ new_y_nd.shape,
+ ) + self.x_offset
- return new_y_nd, read_slc
+ # TODO: can we drop this frame and just use the above?
+ def format_xy_nd_to_1d(
+ self,
+
+ array: np.ndarray,
+ array_key: str,
+ vr: tuple[int, int],
+
+ start: int = 0, # XXX: do we need this?
+ # 0.5 is no overlap between arms, 1.0 is full overlap
+ w: float = 0.16,
+
+ ) -> tuple[
+ np.ndarray,
+ np.ndarray,
+ np.ndarray,
+ ]:
+ '''
+ More or less direct proxy to the ``numba``-fied
+ ``path_arrays_from_ohlc()`` (above) but with closed in kwargs
+ for line spacing.
+
+ '''
+ x, y, c = path_arrays_from_ohlc(
+ array,
+ start,
+ bar_w=self.index_step_size,
+ bar_gap=w * self.index_step_size,
+
+ # XXX: don't ask, due to a ``numba`` bug..
+ use_time_index=(self.index_field == 'time'),
+ )
+ return x, y, c
class OHLCBarsAsCurveFmtr(OHLCBarsFmtr):
@@ -689,8 +656,8 @@ class OHLCBarsAsCurveFmtr(OHLCBarsFmtr):
# should we be passing in array as an xy arrays tuple?
# 2 more datum-indexes to capture zero at end
- x_flat = self.x_nd[self.xy_nd_start:self.xy_nd_stop]
- y_flat = self.y_nd[self.xy_nd_start:self.xy_nd_stop]
+ x_flat = self.x_nd[self.xy_nd_start:self.xy_nd_stop-1]
+ y_flat = self.y_nd[self.xy_nd_start:self.xy_nd_stop-1]
# slice to view
ivl, ivr = vr
@@ -706,14 +673,17 @@ class OHLCBarsAsCurveFmtr(OHLCBarsFmtr):
class StepCurveFmtr(IncrementalFormatter):
+ x_offset: np.ndarray = np.array([
+ 0,
+ 1,
+ ])
+
def allocate_xy_nd(
self,
shm: ShmArray,
data_field: str,
- index_field: str = 'index',
-
) -> tuple[
np.ndarray, # x
np.nd.array # y
@@ -723,19 +693,30 @@ class StepCurveFmtr(IncrementalFormatter):
for use by path graphics generation.
'''
- i = shm._array['index'].copy()
+ i = shm._array[self.index_field].copy()
out = shm._array[data_field].copy()
- x_out = np.broadcast_to(
- i[:, None],
- (i.size, 2),
- ) + np.array([-0.5, 0.5])
+ x_out = (
+ np.broadcast_to(
+ i[:, None],
+ (i.size, 2),
+ )
+ +
+ self.x_offset
+ )
- y_out = np.empty((len(out), 2), dtype=out.dtype)
+ # fill out Nx2 array to hold each step's left + right vertices.
+ y_out = np.empty(
+ x_out.shape,
+ dtype=out.dtype,
+ )
+ # fill in (current) values from source shm buffer
y_out[:] = out[:, np.newaxis]
+ # TODO: pretty sure we can drop this?
# start y at origin level
- y_out[0, 0] = 0
+ # y_out[0, 0] = 0
+ # y_out[self.xy_nd_start] = 0
return x_out, y_out
def incr_update_xy_nd(
@@ -744,12 +725,12 @@ class StepCurveFmtr(IncrementalFormatter):
src_shm: ShmArray,
array_key: str,
- src_update: np.ndarray, # portion of source that was updated
- slc: slice,
+ new_from_src: np.ndarray, # portion of source that was updated
+ read_slc: slice,
ln: int, # len of updated
- first: int,
- last: int,
+ nd_start: int,
+ nd_stop: int,
is_append: bool,
@@ -757,25 +738,62 @@ class StepCurveFmtr(IncrementalFormatter):
np.ndarray,
slice,
]:
- # for a step curve we slice from one datum prior
+ # NOTE: for a step curve we slice from one datum prior
# to the current "update slice" to get the previous
# "level".
- if is_append:
- start = max(last - 1, 0)
- end = src_shm._last.value
- new_y = src_shm._array[start:end][array_key]
- slc = slice(start, end)
-
- else:
- new_y = src_update
-
- return (
- np.broadcast_to(
- new_y[:, None], (new_y.size, 2),
- ),
- slc,
+ #
+ # why this is needed,
+ # - the current new append slice will often have a zero
+ # value in the latest datum-step (at least for zero-on-new
+ # cases like vlm in the) as per configuration of the FSP
+ # engine.
+ # - we need to look back a datum to get the last level which
+ # will be used to terminate/complete the last step x-width
+ # which will be set to pair with the last x-index THIS MEANS
+ #
+ # XXX: this means WE CAN'T USE the append slice since we need to
+ # "look backward" one step to get the needed back-to-zero level
+ # and the update data in ``new_from_src`` will only contain the
+ # latest new data.
+ back_1 = slice(
+ read_slc.start - 1,
+ read_slc.stop,
)
+ to_write = src_shm._array[back_1]
+ y_nd_new = self.y_nd[back_1]
+ y_nd_new[:] = to_write[array_key][:, None]
+
+ x_nd_new = self.x_nd[read_slc]
+ x_nd_new[:] = (
+ new_from_src[self.index_field][:, None]
+ +
+ self.x_offset
+ )
+
+ # XXX: uncomment for debugging
+ # x_nd = self.x_nd[self.xy_slice]
+ # y_nd = self.y_nd[self.xy_slice]
+ # name = self.viz.name
+ # if 'dolla_vlm' in name:
+ # s = 4
+ # print(
+ # f'{name}:\n'
+ # 'NEW_FROM_SRC:\n'
+ # f'new_from_src: {new_from_src}\n\n'
+
+ # f'PRE self.x_nd:'
+ # f'\n{x_nd[-s:]}\n'
+ # f'PRE self.y_nd:\n'
+ # f'{y_nd[-s:]}\n\n'
+
+ # f'TO WRITE:\n'
+ # f'x_nd_new:\n'
+ # f'{x_nd_new}\n'
+ # f'y_nd_new:\n'
+ # f'{y_nd_new}\n'
+ # )
+
def format_xy_nd_to_1d(
self,
@@ -788,65 +806,41 @@ class StepCurveFmtr(IncrementalFormatter):
np.ndarray,
str,
]:
- lasts = array[['index', array_key]]
- last = lasts[array_key][-1]
+ last_t, last = array[-1][[self.index_field, array_key]]
- # 2 more datum-indexes to capture zero at end
- x_step = self.x_nd[self.xy_nd_start:self.xy_nd_stop+2]
- y_step = self.y_nd[self.xy_nd_start:self.xy_nd_stop+2]
- y_step[-1] = last
+ start = self.xy_nd_start
+ stop = self.xy_nd_stop
+
+ x_step = self.x_nd[start:stop]
+ y_step = self.y_nd[start:stop]
# slice out in-view data
ivl, ivr = vr
- ys_iv = y_step[ivl:ivr+1]
- xs_iv = x_step[ivl:ivr+1]
+
+ # NOTE: add an extra step to get the vertical-line-down-to-zero
+ # adjacent to the last-datum graphic (filled rect).
+ x_step_iv = x_step[ivl:ivr+1]
+ y_step_iv = y_step[ivl:ivr+1]
# flatten to 1d
- y_iv = ys_iv.reshape(ys_iv.size)
- x_iv = xs_iv.reshape(xs_iv.size)
+ x_1d = x_step_iv.reshape(x_step_iv.size)
+ y_1d = y_step_iv.reshape(y_step_iv.size)
- # print(
- # f'ys_iv : {ys_iv[-s:]}\n'
- # f'y_iv: {y_iv[-s:]}\n'
- # f'xs_iv: {xs_iv[-s:]}\n'
- # f'x_iv: {x_iv[-s:]}\n'
- # )
+ if (
+ self.index_field == 'time'
+ and x_1d.any()
+ and (x_1d == 0.5).any()
+ ):
+ breakpoint()
- return x_iv, y_iv, 'all'
+ # debugging
+ # if y_1d.any():
+ # s = 6
+ # print(
+ # f'x_step_iv:\n{x_step_iv[-s:]}\n'
+ # f'y_step_iv:\n{y_step_iv[-s:]}\n\n'
+ # f'x_1d:\n{x_1d[-s:]}\n'
+ # f'y_1d:\n{y_1d[-s:]}\n'
+ # )
-
-def xy_downsample(
- x,
- y,
- uppx,
-
- x_spacer: float = 0.5,
-
-) -> tuple[
- np.ndarray,
- np.ndarray,
- float,
- float,
-]:
- '''
- Downsample 1D (flat ``numpy.ndarray``) arrays using M4 given an input
- ``uppx`` (units-per-pixel) and add space between discreet datums.
-
- '''
- # downsample whenever more then 1 pixels per datum can be shown.
- # always refresh data bounds until we get diffing
- # working properly, see above..
- bins, x, y, ymn, ymx = ds_m4(
- x,
- y,
- uppx,
- )
-
- # flatten output to 1d arrays suitable for path-graphics generation.
- x = np.broadcast_to(x[:, None], y.shape)
- x = (x + np.array(
- [-x_spacer, 0, 0, x_spacer]
- )).flatten()
- y = y.flatten()
-
- return x, y, ymn, ymx
+ return x_1d, y_1d, 'all'
diff --git a/piker/ui/_compression.py b/piker/data/_m4.py
similarity index 68%
rename from piker/ui/_compression.py
rename to piker/data/_m4.py
index c66b3e58..f75d3209 100644
--- a/piker/ui/_compression.py
+++ b/piker/data/_m4.py
@@ -15,17 +15,30 @@
# along with this program. If not, see .
'''
-Graphics related downsampling routines for compressing to pixel
-limits on the display device.
+Graphics downsampling using the infamous M4 algorithm.
+
+This is one of ``piker``'s secret weapons allowing us to boss all other
+charting platforms B)
+
+(AND DON'T YOU DARE TAKE THIS CODE WITHOUT CREDIT OR WE'LL SUE UR F#&@* ASS).
+
+NOTES: this method is a so called "visualization driven data
+aggregation" approach. It gives error-free line chart
+downsampling, see
+further scientific paper resources:
+- http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
+- http://www.vldb.org/2014/program/papers/demo/p997-jugel.pdf
+
+Details on implementation of this algo are based in,
+https://github.com/pikers/piker/issues/109
'''
import math
from typing import Optional
import numpy as np
-from numpy.lib import recfunctions as rfn
from numba import (
- jit,
+ njit,
# float64, optional, int64,
)
@@ -35,109 +48,6 @@ from ..log import get_logger
log = get_logger(__name__)
-def hl2mxmn(ohlc: np.ndarray) -> np.ndarray:
- '''
- Convert a OHLC struct-array containing 'high'/'low' columns
- to a "joined" max/min 1-d array.
-
- '''
- index = ohlc['index']
- hls = ohlc[[
- 'low',
- 'high',
- ]]
-
- mxmn = np.empty(2*hls.size, dtype=np.float64)
- x = np.empty(2*hls.size, dtype=np.float64)
- trace_hl(hls, mxmn, x, index[0])
- x = x + index[0]
-
- return mxmn, x
-
-
-@jit(
- # TODO: the type annots..
- # float64[:](float64[:],),
- nopython=True,
-)
-def trace_hl(
- hl: 'np.ndarray',
- out: np.ndarray,
- x: np.ndarray,
- start: int,
-
- # the "offset" values in the x-domain which
- # place the 2 output points around each ``int``
- # master index.
- margin: float = 0.43,
-
-) -> None:
- '''
- "Trace" the outline of the high-low values of an ohlc sequence
- as a line such that the maximum deviation (aka disperaion) between
- bars if preserved.
-
- This routine is expected to modify input arrays in-place.
-
- '''
- last_l = hl['low'][0]
- last_h = hl['high'][0]
-
- for i in range(hl.size):
- row = hl[i]
- l, h = row['low'], row['high']
-
- up_diff = h - last_l
- down_diff = last_h - l
-
- if up_diff > down_diff:
- out[2*i + 1] = h
- out[2*i] = last_l
- else:
- out[2*i + 1] = l
- out[2*i] = last_h
-
- last_l = l
- last_h = h
-
- x[2*i] = int(i) - margin
- x[2*i + 1] = int(i) + margin
-
- return out
-
-
-def ohlc_flatten(
- ohlc: np.ndarray,
- use_mxmn: bool = True,
-
-) -> tuple[np.ndarray, np.ndarray]:
- '''
- Convert an OHLCV struct-array into a flat ready-for-line-plotting
- 1-d array that is 4 times the size with x-domain values distributed
- evenly (by 0.5 steps) over each index.
-
- '''
- index = ohlc['index']
-
- if use_mxmn:
- # traces a line optimally over highs to lows
- # using numba. NOTE: pretty sure this is faster
- # and looks about the same as the below output.
- flat, x = hl2mxmn(ohlc)
-
- else:
- flat = rfn.structured_to_unstructured(
- ohlc[['open', 'high', 'low', 'close']]
- ).flatten()
-
- x = np.linspace(
- start=index[0] - 0.5,
- stop=index[-1] + 0.5,
- num=len(flat),
- )
- return x, flat
-
-
def ds_m4(
x: np.ndarray,
y: np.ndarray,
@@ -160,16 +70,6 @@ def ds_m4(
This is more or less an OHLC style sampling of a line-style series.
'''
- # NOTE: this method is a so called "visualization driven data
- # aggregation" approach. It gives error-free line chart
- # downsampling, see
- # further scientific paper resources:
- # - http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
- # - http://www.vldb.org/2014/program/papers/demo/p997-jugel.pdf
-
- # Details on implementation of this algo are based in,
- # https://github.com/pikers/piker/issues/109
-
# XXX: from infinite on downsampling viewable graphics:
# "one thing i remembered about the binning - if you are
# picking a range within your timeseries the start and end bin
@@ -256,8 +156,7 @@ def ds_m4(
return nb, x_out, y_out, ymn, ymx
-@jit(
- nopython=True,
+@njit(
nogil=True,
)
def _m4(
diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py
new file mode 100644
index 00000000..25e2c451
--- /dev/null
+++ b/piker/data/_pathops.py
@@ -0,0 +1,432 @@
+# piker: trading gear for hackers
+# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+"""
+Super fast ``QPainterPath`` generation related operator routines.
+
+"""
+import numpy as np
+from numpy.lib import recfunctions as rfn
+from numba import (
+ # types,
+ njit,
+ float64,
+ int64,
+ # optional,
+)
+
+# TODO: for ``numba`` typing..
+# from ._source import numba_ohlc_dtype
+from ._m4 import ds_m4
+from .._profile import (
+ Profiler,
+ pg_profile_enabled,
+ ms_slower_then,
+)
+
+
+def xy_downsample(
+ x,
+ y,
+ uppx,
+
+ x_spacer: float = 0.5,
+
+) -> tuple[
+ np.ndarray,
+ np.ndarray,
+ float,
+ float,
+]:
+ '''
+ Downsample 1D (flat ``numpy.ndarray``) arrays using M4 given an input
+ ``uppx`` (units-per-pixel) and add space between discreet datums.
+
+ '''
+ # downsample whenever more then 1 pixels per datum can be shown.
+ # always refresh data bounds until we get diffing
+ # working properly, see above..
+ bins, x, y, ymn, ymx = ds_m4(
+ x,
+ y,
+ uppx,
+ )
+
+ # flatten output to 1d arrays suitable for path-graphics generation.
+ x = np.broadcast_to(x[:, None], y.shape)
+ x = (x + np.array(
+ [-x_spacer, 0, 0, x_spacer]
+ )).flatten()
+ y = y.flatten()
+
+ return x, y, ymn, ymx
+
+
+@njit(
+ # NOTE: need to construct this manually for readonly
+ # arrays, see https://github.com/numba/numba/issues/4511
+ # (
+ # types.Array(
+ # numba_ohlc_dtype,
+ # 1,
+ # 'C',
+ # readonly=True,
+ # ),
+ # int64,
+ # types.unicode_type,
+ # optional(float64),
+ # ),
+ nogil=True
+)
+def path_arrays_from_ohlc(
+ data: np.ndarray,
+ start: int64,
+ bar_w: float64,
+ bar_gap: float64 = 0.16,
+ use_time_index: bool = True,
+
+ # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622
+ # index_field: str,
+
+) -> tuple[
+ np.ndarray,
+ np.ndarray,
+ np.ndarray,
+]:
+ '''
+ Generate an array of lines objects from input ohlc data.
+
+ '''
+ size = int(data.shape[0] * 6)
+
+ # XXX: see this for why the dtype might have to be defined outside
+ # the routine.
+ # https://github.com/numba/numba/issues/4098#issuecomment-493914533
+ x = np.zeros(
+ shape=size,
+ dtype=float64,
+ )
+ y, c = x.copy(), x.copy()
+
+ half_w: float = bar_w/2
+
+ # TODO: report bug for assert @
+ # /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991
+ for i, q in enumerate(data[start:], start):
+
+ open = q['open']
+ high = q['high']
+ low = q['low']
+ close = q['close']
+
+ if use_time_index:
+ index = float64(q['time'])
+ else:
+ index = float64(q['index'])
+
+ # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622
+ # index = float64(q[index_field])
+ # AND this (probably)
+ # open, high, low, close, index = q[
+ # ['open', 'high', 'low', 'close', 'index']]
+
+ istart = i * 6
+ istop = istart + 6
+
+ # x,y detail the 6 points which connect all vertexes of a ohlc bar
+ mid: float = index + half_w
+ x[istart:istop] = (
+ index + bar_gap,
+ mid,
+ mid,
+ mid,
+ mid,
+ index + bar_w - bar_gap,
+ )
+ y[istart:istop] = (
+ open,
+ open,
+ low,
+ high,
+ close,
+ close,
+ )
+
+ # specifies that the first edge is never connected to the
+ # prior bars last edge thus providing a small "gap"/"space"
+ # between bars determined by ``bar_gap``.
+ c[istart:istop] = (1, 1, 1, 1, 1, 0)
+
+ return x, y, c
+
+
+def hl2mxmn(
+ ohlc: np.ndarray,
+ index_field: str = 'index',
+
+) -> np.ndarray:
+ '''
+ Convert a OHLC struct-array containing 'high'/'low' columns
+ to a "joined" max/min 1-d array.
+
+ '''
+ index = ohlc[index_field]
+ hls = ohlc[[
+ 'low',
+ 'high',
+ ]]
+
+ mxmn = np.empty(2*hls.size, dtype=np.float64)
+ x = np.empty(2*hls.size, dtype=np.float64)
+ trace_hl(hls, mxmn, x, index[0])
+ x = x + index[0]
+
+ return mxmn, x
+
+
+@njit(
+ # TODO: the type annots..
+ # float64[:](float64[:],),
+)
+def trace_hl(
+ hl: 'np.ndarray',
+ out: np.ndarray,
+ x: np.ndarray,
+ start: int,
+
+ # the "offset" values in the x-domain which
+ # place the 2 output points around each ``int``
+ # master index.
+ margin: float = 0.43,
+
+) -> None:
+ '''
+ "Trace" the outline of the high-low values of an ohlc sequence
+ as a line such that the maximum deviation (aka disperaion) between
+ bars if preserved.
+
+ This routine is expected to modify input arrays in-place.
+
+ '''
+ last_l = hl['low'][0]
+ last_h = hl['high'][0]
+
+ for i in range(hl.size):
+ row = hl[i]
+ l, h = row['low'], row['high']
+
+ up_diff = h - last_l
+ down_diff = last_h - l
+
+ if up_diff > down_diff:
+ out[2*i + 1] = h
+ out[2*i] = last_l
+ else:
+ out[2*i + 1] = l
+ out[2*i] = last_h
+
+ last_l = l
+ last_h = h
+
+ x[2*i] = int(i) - margin
+ x[2*i + 1] = int(i) + margin
+
+ return out
+
+
+def ohlc_flatten(
+ ohlc: np.ndarray,
+ use_mxmn: bool = True,
+ index_field: str = 'index',
+
+) -> tuple[np.ndarray, np.ndarray]:
+ '''
+ Convert an OHLCV struct-array into a flat ready-for-line-plotting
+ 1-d array that is 4 times the size with x-domain values distributed
+ evenly (by 0.5 steps) over each index.
+
+ '''
+ index = ohlc[index_field]
+
+ if use_mxmn:
+ # traces a line optimally over highs to lows
+ # using numba. NOTE: pretty sure this is faster
+ # and looks about the same as the below output.
+ flat, x = hl2mxmn(ohlc)
+
+ else:
+ flat = rfn.structured_to_unstructured(
+ ohlc[['open', 'high', 'low', 'close']]
+ ).flatten()
+
+ x = np.linspace(
+ start=index[0] - 0.5,
+ stop=index[-1] + 0.5,
+ num=len(flat),
+ )
+ return x, flat
+
+
+def slice_from_time(
+ arr: np.ndarray,
+ start_t: float,
+ stop_t: float,
+ step: int | None = None,
+
+) -> tuple[
+ slice,
+ slice,
+]:
+ '''
+ Calculate array indices mapped from a time range and return them in
+ a slice.
+
+ Given an input array with an epoch `'time'` series entry, calculate
+ the indices which span the time range and return in a slice. Presume
+ each `'time'` step increment is uniform and when the time stamp
+ series contains gaps (the uniform presumption is untrue) use
+ ``np.searchsorted()`` binary search to look up the appropriate
+ index.
+
+ '''
+ profiler = Profiler(
+ msg='slice_from_time()',
+ disabled=not pg_profile_enabled(),
+ ms_threshold=ms_slower_then,
+ )
+
+ times = arr['time']
+ t_first = round(times[0])
+
+ read_i_max = arr.shape[0]
+
+ if step is None:
+ step = round(times[-1] - times[-2])
+ if step == 0:
+ # XXX: HOW TF is this happening?
+ step = 1
+
+ # compute (presumed) uniform-time-step index offsets
+ i_start_t = round(start_t)
+ read_i_start = round(((i_start_t - t_first) // step)) - 1
+
+ i_stop_t = round(stop_t)
+ read_i_stop = round((i_stop_t - t_first) // step) + 1
+
+ # always clip outputs to array support
+ # for read start:
+ # - never allow a start < the 0 index
+ # - never allow an end index > the read array len
+ read_i_start = min(
+ max(0, read_i_start),
+ read_i_max - 1,
+ )
+ read_i_stop = max(
+ 0,
+ min(read_i_stop, read_i_max),
+ )
+
+ # check for larger-then-latest calculated index for given start
+ # time, in which case we do a binary search for the correct index.
+ # NOTE: this is usually the result of a time series with time gaps
+ # where it is expected that each index step maps to a uniform step
+ # in the time stamp series.
+ t_iv_start = times[read_i_start]
+ if (
+ t_iv_start > i_start_t
+ ):
+ # do a binary search for the best index mapping to ``start_t``
+ # given we measured an overshoot using the uniform-time-step
+ # calculation from above.
+
+ # TODO: once we start caching these per source-array,
+ # we can just overwrite ``read_i_start`` directly.
+ new_read_i_start = np.searchsorted(
+ times,
+ i_start_t,
+ side='left',
+ )
+
+ # TODO: minimize binary search work as much as possible:
+ # - cache these remap values which compensate for gaps in the
+ # uniform time step basis where we calc a later start
+ # index for the given input ``start_t``.
+ # - can we shorten the input search sequence by heuristic?
+ # up_to_arith_start = index[:read_i_start]
+
+ if (
+ new_read_i_start < read_i_start
+ ):
+ # t_diff = t_iv_start - start_t
+ # print(
+ # f"WE'RE CUTTING OUT TIME - STEP:{step}\n"
+ # f'start_t:{start_t} -> 0index start_t:{t_iv_start}\n'
+ # f'diff: {t_diff}\n'
+ # f'REMAPPED START i: {read_i_start} -> {new_read_i_start}\n'
+ # )
+ read_i_start = new_read_i_start - 1
+
+ t_iv_stop = times[read_i_stop - 1]
+ if (
+ t_iv_stop > i_stop_t
+ ):
+ # t_diff = stop_t - t_iv_stop
+ # print(
+ # f"WE'RE CUTTING OUT TIME - STEP:{step}\n"
+ # f'calced iv stop:{t_iv_stop} -> stop_t:{stop_t}\n'
+ # f'diff: {t_diff}\n'
+ # # f'SHOULD REMAP STOP: {read_i_start} -> {new_read_i_start}\n'
+ # )
+ new_read_i_stop = np.searchsorted(
+ times[read_i_start:],
+ i_stop_t,
+ side='left',
+ )
+
+ if (
+ new_read_i_stop < read_i_stop
+ ):
+ read_i_stop = read_i_start + new_read_i_stop
+
+ # sanity checks for range size
+ # samples = (i_stop_t - i_start_t) // step
+ # index_diff = read_i_stop - read_i_start + 1
+ # if index_diff > (samples + 3):
+ # breakpoint()
+
+ # read-relative indexes: gives a slice where `shm.array[read_slc]`
+ # will be the data spanning the input time range `start_t` ->
+ # `stop_t`
+ read_slc = slice(
+ int(read_i_start),
+ int(read_i_stop),
+ )
+
+ profiler(
+ 'slicing complete'
+ # f'{start_t} -> {abs_slc.start} | {read_slc.start}\n'
+ # f'{stop_t} -> {abs_slc.stop} | {read_slc.stop}\n'
+ )
+
+ # NOTE: if caller needs absolute buffer indices they can
+ # slice the buffer abs index like so:
+ # index = arr['index']
+ # abs_indx = index[read_slc]
+ # abs_slc = slice(
+ # int(abs_indx[0]),
+ # int(abs_indx[-1]),
+ # )
+
+ return read_slc
diff --git a/piker/data/flows.py b/piker/data/flows.py
index 9bb27230..01ed7851 100644
--- a/piker/data/flows.py
+++ b/piker/data/flows.py
@@ -48,9 +48,13 @@ from ._sharedmem import (
from ._sampling import (
open_sample_stream,
)
+# from .._profile import (
+# Profiler,
+# pg_profile_enabled,
+# )
if TYPE_CHECKING:
- from pyqtgraph import PlotItem
+ # from pyqtgraph import PlotItem
from .feed import Feed
@@ -218,104 +222,18 @@ class Flume(Struct):
def get_index(
self,
time_s: float,
+ array: np.ndarray,
- ) -> int:
+ ) -> int | float:
'''
Return array shm-buffer index for for epoch time.
'''
- array = self.rt_shm.array
times = array['time']
- mask = (times >= time_s)
-
- if any(mask):
- return array['index'][mask][0]
-
- # just the latest index
- array['index'][-1]
-
- def slice_from_time(
- self,
- array: np.ndarray,
- start_t: float,
- stop_t: float,
- timeframe_s: int = 1,
- return_data: bool = False,
-
- ) -> np.ndarray:
- '''
- Slice an input struct array providing only datums
- "in view" of this chart.
-
- '''
- arr = {
- 1: self.rt_shm.array,
- 60: self.hist_shm.arry,
- }[timeframe_s]
-
- times = arr['time']
- index = array['index']
-
- # use advanced indexing to map the
- # time range to the index range.
- mask = (
- (times >= start_t)
- &
- (times < stop_t)
+ first = np.searchsorted(
+ times,
+ time_s,
+ side='left',
)
-
- # TODO: if we can ensure each time field has a uniform
- # step we can instead do some arithmetic to determine
- # the equivalent index like we used to?
- # return array[
- # lbar - ifirst:
- # (rbar - ifirst) + 1
- # ]
-
- i_by_t = index[mask]
- i_0 = i_by_t[0]
-
- abs_slc = slice(
- i_0,
- i_by_t[-1],
- )
- # slice data by offset from the first index
- # available in the passed datum set.
- read_slc = slice(
- 0,
- i_by_t[-1] - i_0,
- )
- if not return_data:
- return (
- abs_slc,
- read_slc,
- )
-
- # also return the readable data from the timerange
- return (
- abs_slc,
- read_slc,
- arr[mask],
- )
-
- def view_data(
- self,
- plot: PlotItem,
- timeframe_s: int = 1,
-
- ) -> np.ndarray:
-
- # get far-side x-indices plot view
- vr = plot.viewRect()
-
- (
- abs_slc,
- buf_slc,
- iv_arr,
- ) = self.slice_from_time(
- start_t=vr.left(),
- stop_t=vr.right(),
- timeframe_s=timeframe_s,
- return_data=True,
- )
- return iv_arr
+ imx = times.shape[0] - 1
+ return min(first, imx)
diff --git a/piker/fsp/_engine.py b/piker/fsp/_engine.py
index a78308a4..37852cfc 100644
--- a/piker/fsp/_engine.py
+++ b/piker/fsp/_engine.py
@@ -188,6 +188,8 @@ async def fsp_compute(
history_by_field['time'] = src_time[-len(history_by_field):]
+ history_output['time'] = src.array['time']
+
# TODO: XXX:
# THERE'S A BIG BUG HERE WITH THE `index` field since we're
# prepending a copy of the first value a few times to make
diff --git a/piker/ui/_app.py b/piker/ui/_app.py
index 2743103e..3be073e7 100644
--- a/piker/ui/_app.py
+++ b/piker/ui/_app.py
@@ -178,8 +178,7 @@ def _main(
tractor_kwargs,
) -> None:
'''
- Sync entry point to start a chart: a ``tractor`` + Qt runtime
- entry point
+ Sync entry point to start a chart: a ``tractor`` + Qt runtime.
'''
run_qtractor(
diff --git a/piker/ui/_axes.py b/piker/ui/_axes.py
index 2ee60bb9..4d197f5a 100644
--- a/piker/ui/_axes.py
+++ b/piker/ui/_axes.py
@@ -49,7 +49,7 @@ class Axis(pg.AxisItem):
def __init__(
self,
plotitem: pgo.PlotItem,
- typical_max_str: str = '100 000.000',
+ typical_max_str: str = '100 000.000 ',
text_color: str = 'bracket',
lru_cache_tick_strings: bool = True,
**kwargs
@@ -95,9 +95,10 @@ class Axis(pg.AxisItem):
self.setPen(_axis_pen)
# this is the text color
- # self.setTextPen(pg.mkPen(hcolor(text_color)))
self.text_color = text_color
+ # generate a bounding rect based on sizing to a "typical"
+ # maximum length-ed string defined as init default.
self.typical_br = _font._qfm.boundingRect(typical_max_str)
# size the pertinent axis dimension to a "typical value"
@@ -154,8 +155,8 @@ class Axis(pg.AxisItem):
pi: pgo.PlotItem,
name: None | str = None,
digits: None | int = 2,
- # axis_name: str = 'right',
- bg_color='bracket',
+ bg_color='default',
+ fg_color='black',
) -> YAxisLabel:
@@ -165,22 +166,20 @@ class Axis(pg.AxisItem):
digits = digits or 2
# TODO: ``._ysticks`` should really be an attr on each
- # ``PlotItem`` no instead of the (containing because of
- # overlays) widget?
+ # ``PlotItem`` now instead of the containing widget (because of
+ # overlays) ?
# add y-axis "last" value label
sticky = self._stickies[name] = YAxisLabel(
pi=pi,
parent=self,
- # TODO: pass this from symbol data
- digits=digits,
- opacity=1,
+ digits=digits, # TODO: pass this from symbol data
+ opacity=0.9, # slight see-through
bg_color=bg_color,
+ fg_color=fg_color,
)
pi.sigRangeChanged.connect(sticky.update_on_resize)
- # pi.addItem(sticky)
- # pi.addItem(last)
return sticky
@@ -244,7 +243,6 @@ class PriceAxis(Axis):
self._min_tick = size
def size_to_values(self) -> None:
- # self.typical_br = _font._qfm.boundingRect(typical_max_str)
self.setWidth(self.typical_br.width())
# XXX: drop for now since it just eats up h space
@@ -302,27 +300,44 @@ class DynamicDateAxis(Axis):
# XX: ARGGGGG AG:LKSKDJF:LKJSDFD
chart = self.pi.chart_widget
- flow = chart._flows[chart.name]
- shm = flow.shm
- bars = shm.array
- first = shm._first.value
+ viz = chart._vizs[chart.name]
+ shm = viz.shm
+ array = shm.array
+ times = array['time']
+ i_0, i_l = times[0], times[-1]
- bars_len = len(bars)
- times = bars['time']
+ if (
+ (indexes[0] < i_0
+ and indexes[-1] < i_l)
+ or
+ (indexes[0] > i_0
+ and indexes[-1] > i_l)
+ ):
+ return []
- epochs = times[list(
- map(
- int,
- filter(
- lambda i: i > 0 and i < bars_len,
- (i-first for i in indexes)
+ if viz.index_field == 'index':
+ arr_len = times.shape[0]
+ first = shm._first.value
+ epochs = times[
+ list(
+ map(
+ int,
+ filter(
+ lambda i: i > 0 and i < arr_len,
+ (i - first for i in indexes)
+ )
+ )
)
- )
- )]
+ ]
+ else:
+ epochs = list(map(int, indexes))
# TODO: **don't** have this hard coded shift to EST
# delay = times[-1] - times[-2]
- dts = np.array(epochs, dtype='datetime64[s]')
+ dts = np.array(
+ epochs,
+ dtype='datetime64[s]',
+ )
# see units listing:
# https://numpy.org/devdocs/reference/arrays.datetime.html#datetime-units
@@ -340,24 +355,39 @@ class DynamicDateAxis(Axis):
spacing: float,
) -> list[str]:
+
+ return self._indexes_to_timestrs(values)
+
+ # NOTE: handy for debugging the lru cache
# info = self.tickStrings.cache_info()
# print(info)
- return self._indexes_to_timestrs(values)
class AxisLabel(pg.GraphicsObject):
- _x_margin = 0
- _y_margin = 0
+ # relative offsets *OF* the bounding rect relative
+ # to parent graphics object.
+ # eg. | => <_x_br_offset> => | |
+ _x_br_offset: float = 0
+ _y_br_offset: float = 0
+
+ # relative offsets of text *within* bounding rect
+ # eg. | <_x_margin> => |
+ _x_margin: float = 0
+ _y_margin: float = 0
+
+ # multiplier of the text content's height in order
+ # to force a larger (y-dimension) bounding rect.
+ _y_txt_h_scaling: float = 1
def __init__(
self,
parent: pg.GraphicsItem,
digits: int = 2,
- bg_color: str = 'bracket',
+ bg_color: str = 'default',
fg_color: str = 'black',
- opacity: int = 1, # XXX: seriously don't set this to 0
+ opacity: int = .8, # XXX: seriously don't set this to 0
font_size: str = 'default',
use_arrow: bool = True,
@@ -368,6 +398,7 @@ class AxisLabel(pg.GraphicsObject):
self.setParentItem(parent)
self.setFlag(self.ItemIgnoresTransformations)
+ self.setZValue(100)
# XXX: pretty sure this is faster
self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
@@ -399,14 +430,14 @@ class AxisLabel(pg.GraphicsObject):
p: QtGui.QPainter,
opt: QtWidgets.QStyleOptionGraphicsItem,
w: QtWidgets.QWidget
+
) -> None:
- """Draw a filled rectangle based on the size of ``.label_str`` text.
+ '''
+ Draw a filled rectangle based on the size of ``.label_str`` text.
Subtypes can customize further by overloading ``.draw()``.
- """
- # p.setCompositionMode(QtWidgets.QPainter.CompositionMode_SourceOver)
-
+ '''
if self.label_str:
# if not self.rect:
@@ -417,7 +448,11 @@ class AxisLabel(pg.GraphicsObject):
p.setFont(self._dpifont.font)
p.setPen(self.fg_color)
- p.drawText(self.rect, self.text_flags, self.label_str)
+ p.drawText(
+ self.rect,
+ self.text_flags,
+ self.label_str,
+ )
def draw(
self,
@@ -425,6 +460,8 @@ class AxisLabel(pg.GraphicsObject):
rect: QtCore.QRectF
) -> None:
+ p.setOpacity(self.opacity)
+
if self._use_arrow:
if not self.path:
self._draw_arrow_path()
@@ -432,15 +469,13 @@ class AxisLabel(pg.GraphicsObject):
p.drawPath(self.path)
p.fillPath(self.path, pg.mkBrush(self.bg_color))
- # this adds a nice black outline around the label for some odd
- # reason; ok by us
- p.setOpacity(self.opacity)
-
# this cause the L1 labels to glitch out if used in the subtype
# and it will leave a small black strip with the arrow path if
# done before the above
- p.fillRect(self.rect, self.bg_color)
-
+ p.fillRect(
+ self.rect,
+ self.bg_color,
+ )
def boundingRect(self): # noqa
'''
@@ -484,15 +519,18 @@ class AxisLabel(pg.GraphicsObject):
txt_h, txt_w = txt_br.height(), txt_br.width()
# print(f'wsw: {self._dpifont.boundingRect(" ")}')
- # allow subtypes to specify a static width and height
+ # allow subtypes to override width and height
h, w = self.size_hint()
- # print(f'axis size: {self._parent.size()}')
- # print(f'axis geo: {self._parent.geometry()}')
self.rect = QtCore.QRectF(
- 0, 0,
+
+ # relative bounds offsets
+ self._x_br_offset,
+ self._y_br_offset,
+
(w or txt_w) + self._x_margin / 2,
- (h or txt_h) + self._y_margin / 2,
+
+ (h or txt_h) * self._y_txt_h_scaling + (self._y_margin / 2),
)
# print(self.rect)
# hb = self.path.controlPointRect()
diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py
index bfe1c110..14921b3d 100644
--- a/piker/ui/_chart.py
+++ b/piker/ui/_chart.py
@@ -60,7 +60,7 @@ from ._style import (
hcolor,
CHART_MARGINS,
_xaxis_at,
- _min_points_to_show,
+ # _min_points_to_show,
)
from ..data.feed import (
Feed,
@@ -72,7 +72,7 @@ from ._interaction import ChartView
from ._forms import FieldsForm
from .._profile import pg_profile_enabled, ms_slower_then
from ._overlay import PlotItemOverlay
-from ._flows import Flow
+from ._dataviz import Viz
from ._search import SearchWidget
from . import _pg_overrides as pgo
from .._profile import Profiler
@@ -711,7 +711,7 @@ class LinkedSplits(QWidget):
if style == 'ohlc_bar':
# graphics, data_key = cpw.draw_ohlc(
- flow = cpw.draw_ohlc(
+ viz = cpw.draw_ohlc(
name,
shm,
flume=flume,
@@ -727,7 +727,7 @@ class LinkedSplits(QWidget):
elif style == 'line':
add_label = True
# graphics, data_key = cpw.draw_curve(
- flow = cpw.draw_curve(
+ viz = cpw.draw_curve(
name,
shm,
flume,
@@ -738,7 +738,7 @@ class LinkedSplits(QWidget):
elif style == 'step':
add_label = True
# graphics, data_key = cpw.draw_curve(
- flow = cpw.draw_curve(
+ viz = cpw.draw_curve(
name,
shm,
flume,
@@ -751,8 +751,8 @@ class LinkedSplits(QWidget):
else:
raise ValueError(f"Chart style {style} is currently unsupported")
- graphics = flow.graphics
- data_key = flow.name
+ graphics = viz.graphics
+ data_key = viz.name
if _is_main:
assert style == 'ohlc_bar', 'main chart must be OHLC'
@@ -810,6 +810,8 @@ class LinkedSplits(QWidget):
self.chart.sidepane.setMinimumWidth(sp_w)
+# TODO: we should really drop using this type and instead just
+# write our own wrapper around `PlotItem`..
class ChartPlotWidget(pg.PlotWidget):
'''
``GraphicsView`` subtype containing a single ``PlotItem``.
@@ -908,7 +910,7 @@ class ChartPlotWidget(pg.PlotWidget):
# self.setViewportMargins(0, 0, 0, 0)
# registry of overlay curve names
- self._flows: dict[str, Flow] = {}
+ self._vizs: dict[str, Viz] = {}
self.feed: Feed | None = None
@@ -921,8 +923,6 @@ class ChartPlotWidget(pg.PlotWidget):
# show background grid
self.showGrid(x=False, y=True, alpha=0.3)
- self.cv.enable_auto_yrange()
-
self.pi_overlay: PlotItemOverlay = PlotItemOverlay(self.plotItem)
# indempotent startup flag for auto-yrange subsys
@@ -951,41 +951,6 @@ class ChartPlotWidget(pg.PlotWidget):
def focus(self) -> None:
self.view.setFocus()
- def _set_xlimits(
- self,
- xfirst: int,
- xlast: int
- ) -> None:
- """Set view limits (what's shown in the main chart "pane")
- based on max/min x/y coords.
- """
- self.setLimits(
- xMin=xfirst,
- xMax=xlast,
- minXRange=_min_points_to_show,
- )
-
- def view_range(self) -> tuple[int, int]:
- vr = self.viewRect()
- return int(vr.left()), int(vr.right())
-
- def bars_range(self) -> tuple[int, int, int, int]:
- '''
- Return a range tuple for the bars present in view.
-
- '''
- main_flow = self._flows[self.name]
- ifirst, l, lbar, rbar, r, ilast = main_flow.datums_range()
- return l, lbar, rbar, r
-
- def curve_width_pxs(
- self,
- ) -> float:
- _, lbar, rbar, _ = self.bars_range()
- return self.view.mapViewToDevice(
- QLineF(lbar, 0, rbar, 0)
- ).length()
-
def pre_l1_xs(self) -> tuple[float, float]:
'''
Return the view x-coord for the value just before
@@ -994,11 +959,16 @@ class ChartPlotWidget(pg.PlotWidget):
'''
line_end, marker_right, yaxis_x = self.marker_right_points()
- view = self.view
- line = view.mapToView(
+ line = self.view.mapToView(
QLineF(line_end, 0, yaxis_x, 0)
)
- return line.x1(), line.length()
+ linex, linelen = line.x1(), line.length()
+ # print(
+ # f'line: {line}\n'
+ # f'linex: {linex}\n'
+ # f'linelen: {linelen}\n'
+ # )
+ return linex, linelen
def marker_right_points(
self,
@@ -1020,11 +990,16 @@ class ChartPlotWidget(pg.PlotWidget):
ryaxis = self.getAxis('right')
r_axis_x = ryaxis.pos().x()
- up_to_l1_sc = r_axis_x - l1_len - 10
-
+ up_to_l1_sc = r_axis_x - l1_len
marker_right = up_to_l1_sc - (1.375 * 2 * marker_size)
line_end = marker_right - (6/16 * marker_size)
+ # print(
+ # f'r_axis_x: {r_axis_x}\n'
+ # f'up_to_l1_sc: {up_to_l1_sc}\n'
+ # f'marker_right: {marker_right}\n'
+ # f'line_end: {line_end}\n'
+ # )
return line_end, marker_right, r_axis_x
def default_view(
@@ -1038,95 +1013,45 @@ class ChartPlotWidget(pg.PlotWidget):
Set the view box to the "default" startup view of the scene.
'''
- flow = self._flows.get(self.name)
- if not flow:
- log.warning(f'`Flow` for {self.name} not loaded yet?')
+ viz = self.get_viz(self.name)
+
+ if not viz:
+ log.warning(f'`Viz` for {self.name} not loaded yet?')
return
- arr = flow.shm.array
- index = arr['index']
- # times = arr['time']
-
- # these will be epoch time floats
- xfirst, xlast = index[0], index[-1]
- l, lbar, rbar, r = self.bars_range()
-
- view = self.view
-
- if (
- rbar < 0
- or l < xfirst
- or l < 0
- or (rbar - lbar) < 6
- ):
- # TODO: set fixed bars count on screen that approx includes as
- # many bars as possible before a downsample line is shown.
- begin = xlast - bars_from_y
- view.setXRange(
- min=begin,
- max=xlast,
- padding=0,
- )
- # re-get range
- l, lbar, rbar, r = self.bars_range()
-
- # we get the L1 spread label "length" in view coords
- # terms now that we've scaled either by user control
- # or to the default set of bars as per the immediate block
- # above.
- if not y_offset:
- marker_pos, l1_len = self.pre_l1_xs()
- end = xlast + l1_len + 1
- else:
- end = xlast + y_offset + 1
-
- begin = end - (r - l)
-
- # for debugging
- # print(
- # # f'bars range: {brange}\n'
- # f'xlast: {xlast}\n'
- # f'marker pos: {marker_pos}\n'
- # f'l1 len: {l1_len}\n'
- # f'begin: {begin}\n'
- # f'end: {end}\n'
- # )
-
- # remove any custom user yrange setttings
- if self._static_yrange == 'axis':
- self._static_yrange = None
-
- view.setXRange(
- min=begin,
- max=end,
- padding=0,
+ viz.default_view(
+ bars_from_y,
+ y_offset,
+ do_ds,
)
if do_ds:
- self.view.maybe_downsample_graphics()
- view._set_yrange()
-
- try:
self.linked.graphics_cycle()
- except IndexError:
- pass
def increment_view(
self,
- steps: int = 1,
+ datums: int = 1,
vb: Optional[ChartView] = None,
) -> None:
- """
- Increment the data view one step to the right thus "following"
- the current time slot/step/bar.
+ '''
+ Increment the data view ``datums``` steps toward y-axis thus
+ "following" the current time slot/step/bar.
- """
- l, r = self.view_range()
+ '''
view = vb or self.view
+ viz = self.main_viz
+ l, r = viz.view_range()
+ x_shift = viz.index_step() * datums
+
+ if datums >= 300:
+ print("FUCKING FIX THE GLOBAL STEP BULLSHIT")
+ # breakpoint()
+ return
+
view.setXRange(
- min=l + steps,
- max=r + steps,
+ min=l + x_shift,
+ max=r + x_shift,
# TODO: holy shit, wtf dude... why tf would this not be 0 by
# default... speechless.
@@ -1220,7 +1145,7 @@ class ChartPlotWidget(pg.PlotWidget):
**graphics_kwargs,
- ) -> Flow:
+ ) -> Viz:
'''
Draw a "curve" (line plot graphics) for the provided data in
the input shm array ``shm``.
@@ -1254,17 +1179,17 @@ class ChartPlotWidget(pg.PlotWidget):
**graphics_kwargs,
)
- flow = self._flows[data_key] = Flow(
+ viz = self._vizs[data_key] = Viz(
data_key,
pi,
shm,
flume,
is_ohlc=is_ohlc,
- # register curve graphics with this flow
+ # register curve graphics with this viz
graphics=graphics,
)
- assert isinstance(flow.shm, ShmArray)
+ assert isinstance(viz.shm, ShmArray)
# TODO: this probably needs its own method?
if overlay:
@@ -1321,7 +1246,7 @@ class ChartPlotWidget(pg.PlotWidget):
# understand.
pi.addItem(graphics)
- return flow
+ return viz
def draw_ohlc(
self,
@@ -1332,7 +1257,7 @@ class ChartPlotWidget(pg.PlotWidget):
array_key: Optional[str] = None,
**draw_curve_kwargs,
- ) -> Flow:
+ ) -> Viz:
'''
Draw OHLC datums to chart.
@@ -1358,41 +1283,12 @@ class ChartPlotWidget(pg.PlotWidget):
Update the named internal graphics from ``array``.
'''
- flow = self._flows[array_key or graphics_name]
- return flow.update_graphics(
+ viz = self._vizs[array_key or graphics_name]
+ return viz.update_graphics(
array_key=array_key,
**kwargs,
)
- # def _label_h(self, yhigh: float, ylow: float) -> float:
- # # compute contents label "height" in view terms
- # # to avoid having data "contents" overlap with them
- # if self._labels:
- # label = self._labels[self.name][0]
-
- # rect = label.itemRect()
- # tl, br = rect.topLeft(), rect.bottomRight()
- # vb = self.plotItem.vb
-
- # try:
- # # on startup labels might not yet be rendered
- # top, bottom = (vb.mapToView(tl).y(), vb.mapToView(br).y())
-
- # # XXX: magic hack, how do we compute exactly?
- # label_h = (top - bottom) * 0.42
-
- # except np.linalg.LinAlgError:
- # label_h = 0
- # else:
- # label_h = 0
-
- # # print(f'label height {self.name}: {label_h}')
-
- # if label_h > yhigh - ylow:
- # label_h = 0
-
- # print(f"bounds (ylow, yhigh): {(ylow, yhigh)}")
-
# TODO: pretty sure we can just call the cursor
# directly not? i don't wee why we need special "signal proxies"
# for this lul..
@@ -1426,36 +1322,34 @@ class ChartPlotWidget(pg.PlotWidget):
delayed=True,
)
- # TODO: here we should instead look up the ``Flow.shm.array``
+ # TODO: here we should instead look up the ``Viz.shm.array``
# and read directly from shm to avoid copying to memory first
# and then reading it again here.
- flow_key = name or self.name
- flow = self._flows.get(flow_key)
- if (
- flow is None
- ):
- log.error(f"flow {flow_key} doesn't exist in chart {self.name} !?")
+ viz_key = name or self.name
+ viz = self._vizs.get(viz_key)
+ if viz is None:
+ log.error(f"viz {viz_key} doesn't exist in chart {self.name} !?")
key = res = 0, 0
else:
(
- first,
l,
+ _,
lbar,
rbar,
+ _,
r,
- last,
- ) = bars_range or flow.datums_range()
- profiler(f'{self.name} got bars range')
+ ) = bars_range or viz.datums_range()
- key = round(lbar), round(rbar)
- res = flow.maxmin(*key)
+ profiler(f'{self.name} got bars range')
+ key = lbar, rbar
+ res = viz.maxmin(*key)
if (
res is None
):
log.warning(
- f"{flow_key} no mxmn for bars_range => {key} !?"
+ f"{viz_key} no mxmn for bars_range => {key} !?"
)
res = 0, 0
if not self._on_screen:
@@ -1463,5 +1357,19 @@ class ChartPlotWidget(pg.PlotWidget):
self._on_screen = True
profiler(f'yrange mxmn: {key} -> {res}')
- # print(f'{flow_key} yrange mxmn: {key} -> {res}')
+ # print(f'{viz_key} yrange mxmn: {key} -> {res}')
return res
+
+ def get_viz(
+ self,
+ key: str,
+ ) -> Viz:
+ '''
+ Try to get an underlying ``Viz`` by key.
+
+ '''
+ return self._vizs.get(key)
+
+ @property
+ def main_viz(self) -> Viz:
+ return self.get_viz(self.name)
diff --git a/piker/ui/_cursor.py b/piker/ui/_cursor.py
index fd00c380..762acf73 100644
--- a/piker/ui/_cursor.py
+++ b/piker/ui/_cursor.py
@@ -274,8 +274,8 @@ class ContentsLabels:
) -> None:
for chart, name, label, update in self._labels:
- flow = chart._flows[name]
- array = flow.shm.array
+ viz = chart.get_viz(name)
+ array = viz.shm.array
if not (
index >= 0
@@ -482,25 +482,32 @@ class Cursor(pg.GraphicsObject):
def add_curve_cursor(
self,
- plot: ChartPlotWidget, # noqa
+ chart: ChartPlotWidget, # noqa
curve: 'PlotCurveItem', # noqa
) -> LineDot:
- # if this plot contains curves add line dot "cursors" to denote
+ # if this chart contains curves add line dot "cursors" to denote
# the current sample under the mouse
- main_flow = plot._flows[plot.name]
+ main_viz = chart.get_viz(chart.name)
+
# read out last index
- i = main_flow.shm.array[-1]['index']
+ i = main_viz.shm.array[-1]['index']
cursor = LineDot(
curve,
index=i,
- plot=plot
+ plot=chart
)
- plot.addItem(cursor)
- self.graphics[plot].setdefault('cursors', []).append(cursor)
+ chart.addItem(cursor)
+ self.graphics[chart].setdefault('cursors', []).append(cursor)
return cursor
- def mouseAction(self, action, plot): # noqa
+ def mouseAction(
+ self,
+ action: str,
+ plot: ChartPlotWidget,
+
+ ) -> None: # noqa
+
log.debug(f"{(action, plot.name)}")
if action == 'Enter':
self.active_plot = plot
diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py
index b9a143a2..a3287341 100644
--- a/piker/ui/_curve.py
+++ b/piker/ui/_curve.py
@@ -36,10 +36,6 @@ from PyQt5.QtGui import (
)
from .._profile import pg_profile_enabled, ms_slower_then
from ._style import hcolor
-# from ._compression import (
-# # ohlc_to_m4_line,
-# ds_m4,
-# )
from ..log import get_logger
from .._profile import Profiler
@@ -55,7 +51,39 @@ _line_styles: dict[str, int] = {
}
-class Curve(pg.GraphicsObject):
+class FlowGraphic(pg.GraphicsObject):
+ '''
+ Base class with minimal interface for `QPainterPath` implemented,
+ real-time updated "data flow" graphics.
+
+ See subtypes below.
+
+ '''
+ # sub-type customization methods
+ declare_paintables: Optional[Callable] = None
+ sub_paint: Optional[Callable] = None
+
+ # TODO: can we remove this?
+ # sub_br: Optional[Callable] = None
+
+ def x_uppx(self) -> int:
+
+ px_vecs = self.pixelVectors()[0]
+ if px_vecs:
+ return px_vecs.x()
+ else:
+ return 0
+
+ def x_last(self) -> float | None:
+ '''
+ Return the last most x value of the last line segment or if not
+ drawn yet, ``None``.
+
+ '''
+ return self._last_line.x1() if self._last_line else None
+
+
+class Curve(FlowGraphic):
'''
A faster, simpler, append friendly version of
``pyqtgraph.PlotCurveItem`` built for highly customizable real-time
@@ -72,7 +100,7 @@ class Curve(pg.GraphicsObject):
lower level graphics data can be rendered in different threads and
then read and drawn in this main thread without having to worry
about dealing with Qt's concurrency primitives. See
- ``piker.ui._flows.Renderer`` for details and logic related to lower
+ ``piker.ui._render.Renderer`` for details and logic related to lower
level path generation and incremental update. The main differences in
the path generation code include:
@@ -85,11 +113,6 @@ class Curve(pg.GraphicsObject):
'''
- # sub-type customization methods
- declare_paintables: Optional[Callable] = None
- sub_paint: Optional[Callable] = None
- # sub_br: Optional[Callable] = None
-
def __init__(
self,
*args,
@@ -99,7 +122,6 @@ class Curve(pg.GraphicsObject):
fill_color: Optional[str] = None,
style: str = 'solid',
name: Optional[str] = None,
- use_fpath: bool = True,
**kwargs
@@ -114,11 +136,11 @@ class Curve(pg.GraphicsObject):
# self._last_cap: int = 0
self.path: Optional[QPainterPath] = None
- # additional path used for appends which tries to avoid
- # triggering an update/redraw of the presumably larger
- # historical ``.path`` above.
- self.use_fpath = use_fpath
- self.fast_path: Optional[QPainterPath] = None
+ # additional path that can be optionally used for appends which
+ # tries to avoid triggering an update/redraw of the presumably
+ # larger historical ``.path`` above. the flag to enable
+ # this behaviour is found in `Renderer.render()`.
+ self.fast_path: QPainterPath | None = None
# TODO: we can probably just dispense with the parent since
# we're basically only using the pen setting now...
@@ -137,7 +159,7 @@ class Curve(pg.GraphicsObject):
# self.last_step_pen = pg.mkPen(hcolor(color), width=2)
self.last_step_pen = pg.mkPen(pen, width=2)
- self._last_line = QLineF()
+ self._last_line: QLineF = QLineF()
# flat-top style histogram-like discrete curve
# self._step_mode: bool = step_mode
@@ -158,51 +180,19 @@ class Curve(pg.GraphicsObject):
# endpoint (something we saw on trade rate curves)
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
- # XXX: see explanation for different caching modes:
- # https://stackoverflow.com/a/39410081
- # seems to only be useful if we don't re-generate the entire
- # QPainterPath every time
- # curve.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
-
+ # XXX-NOTE-XXX: graphics caching.
+ # see explanation for different caching modes:
+ # https://stackoverflow.com/a/39410081 seems to only be useful
+ # if we don't re-generate the entire QPainterPath every time
# don't ever use this - it's a colossal nightmare of artefacts
# and is disastrous for performance.
- # curve.setCacheMode(QtWidgets.QGraphicsItem.ItemCoordinateCache)
+ # self.setCacheMode(QtWidgets.QGraphicsItem.ItemCoordinateCache)
# allow sub-type customization
declare = self.declare_paintables
if declare:
declare()
- # TODO: probably stick this in a new parent
- # type which will contain our own version of
- # what ``PlotCurveItem`` had in terms of base
- # functionality? A `FlowGraphic` maybe?
- def x_uppx(self) -> int:
-
- px_vecs = self.pixelVectors()[0]
- if px_vecs:
- xs_in_px = px_vecs.x()
- return round(xs_in_px)
- else:
- return 0
-
- def px_width(self) -> float:
-
- vb = self.getViewBox()
- if not vb:
- return 0
-
- vr = self.viewRect()
- l, r = int(vr.left()), int(vr.right())
-
- start, stop = self._xrange
- lbar = max(l, start)
- rbar = min(r, stop)
-
- return vb.mapViewToDevice(
- QLineF(lbar, 0, rbar, 0)
- ).length()
-
# XXX: lol brutal, the internals of `CurvePoint` (inherited by
# our `LineDot`) required ``.getData()`` to work..
def getData(self):
@@ -357,22 +347,30 @@ class Curve(pg.GraphicsObject):
self,
path: QPainterPath,
src_data: np.ndarray,
- render_data: np.ndarray,
reset: bool,
array_key: str,
+ index_field: str,
) -> None:
# default line draw last call
# with self.reset_cache():
- x = render_data['index']
- y = render_data[array_key]
+ x = src_data[index_field]
+ y = src_data[array_key]
+
+ x_last = x[-1]
+ x_2last = x[-2]
# draw the "current" step graphic segment so it
# lines up with the "middle" of the current
# (OHLC) sample.
self._last_line = QLineF(
- x[-2], y[-2],
- x[-1], y[-1],
+
+ # NOTE: currently we draw in x-domain
+ # from last datum to current such that
+ # the end of line touches the "beginning"
+ # of the current datum step span.
+ x_2last , y[-2],
+ x_last, y[-1],
)
return x, y
@@ -388,13 +386,13 @@ class FlattenedOHLC(Curve):
self,
path: QPainterPath,
src_data: np.ndarray,
- render_data: np.ndarray,
reset: bool,
array_key: str,
+ index_field: str,
) -> None:
lasts = src_data[-2:]
- x = lasts['index']
+ x = lasts[index_field]
y = lasts['close']
# draw the "current" step graphic segment so it
@@ -418,9 +416,9 @@ class StepCurve(Curve):
self,
path: QPainterPath,
src_data: np.ndarray,
- render_data: np.ndarray,
reset: bool,
array_key: str,
+ index_field: str,
w: float = 0.5,
@@ -429,14 +427,13 @@ class StepCurve(Curve):
# TODO: remove this and instead place all step curve
# updating into pre-path data render callbacks.
# full input data
- x = src_data['index']
+ x = src_data[index_field]
y = src_data[array_key]
x_last = x[-1]
x_2last = x[-2]
y_last = y[-1]
step_size = x_last - x_2last
- half_step = step_size / 2
# lol, commenting this makes step curves
# all "black" for me :eyeroll:..
@@ -445,7 +442,7 @@ class StepCurve(Curve):
x_last, 0,
)
self._last_step_rect = QRectF(
- x_last - half_step, 0,
+ x_last, 0,
step_size, y_last,
)
return x, y
@@ -458,9 +455,3 @@ class StepCurve(Curve):
# p.drawLines(*tuple(filter(bool, self._last_step_lines)))
# p.drawRect(self._last_step_rect)
p.fillRect(self._last_step_rect, self._brush)
-
- # def sub_br(
- # self,
- # parent_br: QRectF | None = None,
- # ) -> QRectF:
- # return self._last_step_rect
diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py
new file mode 100644
index 00000000..07ead769
--- /dev/null
+++ b/piker/ui/_dataviz.py
@@ -0,0 +1,1083 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+Data vizualization APIs
+
+'''
+from __future__ import annotations
+from typing import (
+ Optional,
+ Literal,
+ TYPE_CHECKING,
+)
+
+import msgspec
+import numpy as np
+import pyqtgraph as pg
+from PyQt5.QtCore import QLineF
+
+from ..data._sharedmem import (
+ ShmArray,
+)
+from ..data.feed import Flume
+from ..data._formatters import (
+ IncrementalFormatter,
+ OHLCBarsFmtr, # Plain OHLC renderer
+ OHLCBarsAsCurveFmtr, # OHLC converted to line
+ StepCurveFmtr, # "step" curve (like for vlm)
+)
+from ..data._pathops import (
+ slice_from_time,
+)
+from ._ohlc import (
+ BarItems,
+)
+from ._curve import (
+ Curve,
+ StepCurve,
+ FlattenedOHLC,
+)
+from ._render import Renderer
+from ..log import get_logger
+from .._profile import (
+ Profiler,
+ pg_profile_enabled,
+)
+
+
+if TYPE_CHECKING:
+ from ._interaction import ChartView
+ from ._chart import ChartPlotWidget
+ from ._display import DisplayState
+
+
+log = get_logger(__name__)
+
+
+def render_baritems(
+ viz: Viz,
+ graphics: BarItems,
+ read: tuple[
+ int, int, np.ndarray,
+ int, int, np.ndarray,
+ ],
+ profiler: Profiler,
+ **kwargs,
+
+) -> None:
+ '''
+ Graphics management logic for a ``BarItems`` object.
+
+ Mostly just logic to determine when and how to downsample an OHLC
+ lines curve into a flattened line graphic and when to display one
+ graphic or the other.
+
+ TODO: this should likely be moved into some kind of better abstraction
+ layer, if not a `Renderer` then something just above it?
+
+ '''
+ bars = graphics
+
+ self = viz # TODO: make this a ``Viz`` method?
+ r = self._src_r
+ first_render: bool = False
+
+ # if no source data renderer exists create one.
+ if not r:
+ first_render = True
+
+ # OHLC bars path renderer
+ r = self._src_r = Renderer(
+ viz=self,
+ fmtr=OHLCBarsFmtr(
+ shm=viz.shm,
+ viz=viz,
+ ),
+ )
+
+ ds_curve_r = Renderer(
+ viz=self,
+ fmtr=OHLCBarsAsCurveFmtr(
+ shm=viz.shm,
+ viz=viz,
+ ),
+ )
+
+ curve = FlattenedOHLC(
+ name=f'{viz.name}_ds_ohlc',
+ color=bars._color,
+ )
+ viz.ds_graphics = curve
+ curve.hide()
+ self.plot.addItem(curve)
+
+ # baseline "line" downsampled OHLC curve that should
+ # kick on only when we reach a certain uppx threshold.
+ self._render_table = (ds_curve_r, curve)
+
+ ds_r, curve = self._render_table
+
+ # print(
+ # f'r: {r.fmtr.xy_slice}\n'
+ # f'ds_r: {ds_r.fmtr.xy_slice}\n'
+ # )
+
+ # do checks for whether or not we require downsampling:
+ # - if we're **not** downsampling then we simply want to
+ # render the bars graphics curve and update..
+ # - if instead we are in a downsamplig state then we to
+ x_gt = 6 * (self.index_step() or 1)
+ uppx = curve.x_uppx()
+ # print(f'BARS UPPX: {uppx}')
+ in_line = should_line = curve.isVisible()
+
+ if (
+ in_line
+ and uppx < x_gt
+ ):
+ # print('FLIPPING TO BARS')
+ should_line = False
+ viz._in_ds = False
+
+ elif (
+ not in_line
+ and uppx >= x_gt
+ ):
+ # print('FLIPPING TO LINE')
+ should_line = True
+ viz._in_ds = True
+
+ profiler(f'ds logic complete line={should_line}')
+
+ # do graphics updates
+ if should_line:
+ r = ds_r
+ graphics = curve
+ profiler('updated ds curve')
+
+ else:
+ graphics = bars
+
+ if first_render:
+ bars.show()
+
+ changed_to_line = False
+ if (
+ not in_line
+ and should_line
+ ):
+ # change to line graphic
+ log.info(
+ f'downsampling to line graphic {self.name}'
+ )
+ bars.hide()
+ curve.show()
+ curve.update()
+ changed_to_line = True
+
+ elif (
+ in_line
+ and not should_line
+ ):
+ # change to bars graphic
+ log.info(
+ f'showing bars graphic {self.name}\n'
+ f'first bars render?: {first_render}'
+ )
+ curve.hide()
+ bars.show()
+ bars.update()
+
+ # XXX: is this required?
+ viz._in_ds = should_line
+
+ should_redraw = (
+ changed_to_line
+ or not should_line
+ )
+ return (
+ graphics,
+ r,
+ should_redraw,
+ should_line,
+ )
+
+
+class Viz(msgspec.Struct): # , frozen=True):
+ '''
+ (Data) "Visualization" compound type which wraps a real-time
+ shm array stream with displayed graphics (curves, charts)
+ for high level access and control as well as efficient incremental
+ update.
+
+ The intention is for this type to eventually be capable of shm-passing
+ of incrementally updated graphics stream data between actors.
+
+ '''
+ name: str
+ plot: pg.PlotItem
+ _shm: ShmArray
+ flume: Flume
+ graphics: Curve | BarItems
+
+ # for tracking y-mn/mx for y-axis auto-ranging
+ yrange: tuple[float, float] = None
+
+ # in some cases a viz may want to change its
+ # graphical "type" or, "form" when downsampling, to
+ # start this is only ever an interpolation line.
+ ds_graphics: Optional[Curve] = None
+
+ is_ohlc: bool = False
+ render: bool = True # toggle for display loop
+
+ _index_field: Literal[
+ 'index',
+ 'time',
+
+ # TODO: idea is to re-index all time series to a common
+ # longest-len-int-index where we avoid gaps and instead
+ # graph on the 0 -> N domain of the array index super set.
+ # 'gapless',
+
+ ] = 'time'
+
+ # downsampling state
+ _last_uppx: float = 0
+ _in_ds: bool = False
+ _index_step: float | None = None
+
+ # map from uppx -> (downsampled data, incremental graphics)
+ _src_r: Optional[Renderer] = None
+ _render_table: dict[
+ Optional[int],
+ tuple[Renderer, pg.GraphicsItem],
+ ] = (None, None)
+
+ # cache of y-range values per x-range input.
+ _mxmns: dict[tuple[int, int], tuple[float, float]] = {}
+
+ @property
+ def shm(self) -> ShmArray:
+ return self._shm
+
+ @property
+ def index_field(self) -> str:
+ return self._index_field
+
+ def index_step(
+ self,
+ reset: bool = False,
+
+ ) -> float:
+ if self._index_step is None:
+ index = self.shm.array[self.index_field]
+ isample = index[:16]
+ mxdiff = np.diff(isample).max()
+ self._index_step = max(mxdiff, 1)
+ if (
+ mxdiff < 1
+ or 1 < mxdiff < 60
+ ):
+ breakpoint()
+
+ return self._index_step
+
+ def maxmin(
+ self,
+ lbar: int,
+ rbar: int,
+
+ use_caching: bool = True,
+
+ ) -> Optional[tuple[float, float]]:
+ '''
+ Compute the cached max and min y-range values for a given
+ x-range determined by ``lbar`` and ``rbar`` or ``None``
+ if no range can be determined (yet).
+
+ '''
+ # TODO: hash the slice instead maybe?
+ # https://stackoverflow.com/a/29980872
+ rkey = (round(lbar), round(rbar))
+
+ do_print: bool = False
+ if use_caching:
+ cached_result = self._mxmns.get(rkey)
+ if cached_result:
+ if do_print:
+ print(
+ f'{self.name} CACHED maxmin\n'
+ f'{rkey} -> {cached_result}'
+ )
+ return cached_result
+
+ shm = self.shm
+ if shm is None:
+ return None
+
+ arr = shm.array
+
+ # get relative slice indexes into array
+ if self.index_field == 'time':
+ read_slc = slice_from_time(
+ arr,
+ start_t=lbar,
+ stop_t=rbar,
+ step=self.index_step(),
+ )
+
+ else:
+ ifirst = arr[0]['index']
+ read_slc = slice(
+ lbar - ifirst,
+ (rbar - ifirst) + 1
+ )
+
+ slice_view = arr[read_slc]
+
+ if not slice_view.size:
+ log.warning(f'{self.name} no maxmin in view?')
+ return None
+
+ elif self.yrange:
+ mxmn = self.yrange
+ if do_print:
+ print(
+ f'{self.name} M4 maxmin:\n'
+ f'{rkey} -> {mxmn}'
+ )
+
+ else:
+ if self.is_ohlc:
+ ylow = np.min(slice_view['low'])
+ yhigh = np.max(slice_view['high'])
+
+ else:
+ view = slice_view[self.name]
+ ylow = np.min(view)
+ yhigh = np.max(view)
+
+ mxmn = ylow, yhigh
+ if (
+ do_print
+ ):
+ s = 3
+ print(
+ f'{self.name} MANUAL ohlc={self.is_ohlc} maxmin:\n'
+ f'{rkey} -> {mxmn}\n'
+ f'read_slc: {read_slc}\n'
+ # f'abs_slc: {slice_view["index"]}\n'
+ f'first {s}:\n{slice_view[:s]}\n'
+ f'last {s}:\n{slice_view[-s:]}\n'
+ )
+
+ # cache result for input range
+ assert mxmn
+ self._mxmns[rkey] = mxmn
+
+ return mxmn
+
+ def view_range(self) -> tuple[int, int]:
+ '''
+ Return the start and stop x-indexes for the managed ``ViewBox``.
+
+ '''
+ vr = self.plot.viewRect()
+ return (
+ vr.left(),
+ vr.right(),
+ )
+
+ def bars_range(self) -> tuple[int, int, int, int]:
+ '''
+ Return a range tuple for the left-view, left-datum, right-datum
+ and right-view x-indices.
+
+ '''
+ l, start, datum_start, datum_stop, stop, r = self.datums_range()
+ return l, datum_start, datum_stop, r
+
+ def datums_range(
+ self,
+ view_range: None | tuple[float, float] = None,
+ index_field: str | None = None,
+ array: None | np.ndarray = None,
+
+ ) -> tuple[
+ int, int, int, int, int, int
+ ]:
+ '''
+ Return a range tuple for the datums present in view.
+
+ '''
+ l, r = view_range or self.view_range()
+
+ index_field: str = index_field or self.index_field
+ if index_field == 'index':
+ l, r = round(l), round(r)
+
+ if array is None:
+ array = self.shm.array
+
+ index = array[index_field]
+ first = round(index[0])
+ last = round(index[-1])
+
+ # first and last datums in view determined by
+ # l / r view range.
+ leftmost = round(l)
+ rightmost = round(r)
+
+ # invalid view state
+ if (
+ r < l
+ or l < 0
+ or r < 0
+ or (l > last and r > last)
+ ):
+ leftmost = first
+ rightmost = last
+ else:
+ rightmost = max(
+ min(last, rightmost),
+ first,
+ )
+
+ leftmost = min(
+ max(first, leftmost),
+ last,
+ rightmost - 1,
+ )
+
+ assert leftmost < rightmost
+
+ return (
+ l, # left x-in-view
+ first, # first datum
+ leftmost,
+ rightmost,
+ last, # last_datum
+ r, # right-x-in-view
+ )
+
+ def read(
+ self,
+ array_field: Optional[str] = None,
+ index_field: str | None = None,
+ profiler: None | Profiler = None,
+
+ ) -> tuple[
+ int, int, np.ndarray,
+ int, int, np.ndarray,
+ ]:
+ '''
+ Read the underlying shm array buffer and
+ return the data plus indexes for the first
+ and last
+ which has been written to.
+
+ '''
+ index_field: str = index_field or self.index_field
+ vr = l, r = self.view_range()
+
+ # readable data
+ array = self.shm.array
+
+ if profiler:
+ profiler('self.shm.array READ')
+
+ (
+ l,
+ ifirst,
+ lbar,
+ rbar,
+ ilast,
+ r,
+ ) = self.datums_range(
+ view_range=vr,
+ index_field=index_field,
+ array=array,
+ )
+
+ if profiler:
+ profiler('self.datums_range()')
+
+ abs_slc = slice(ifirst, ilast)
+
+ # TODO: support time slicing
+ if index_field == 'time':
+ read_slc = slice_from_time(
+ array,
+ start_t=lbar,
+ stop_t=rbar,
+ )
+
+ # TODO: maybe we should return this from the slicer call
+ # above?
+ in_view = array[read_slc]
+ if in_view.size:
+ abs_indx = in_view['index']
+ abs_slc = slice(
+ int(abs_indx[0]),
+ int(abs_indx[-1]),
+ )
+
+ if profiler:
+ profiler(
+ '`slice_from_time('
+ f'start_t={lbar}'
+ f'stop_t={rbar})'
+ )
+
+ # array-index slicing
+ # TODO: can we do time based indexing using arithmetic presuming
+ # a uniform time stamp step size?
+ else:
+ # get read-relative indices adjusting for master shm index.
+ lbar_i = max(l, ifirst) - ifirst
+ rbar_i = min(r, ilast) - ifirst
+
+ # NOTE: the slice here does NOT include the extra ``+ 1``
+ # BUT the ``in_view`` slice DOES..
+ read_slc = slice(lbar_i, rbar_i)
+ in_view = array[lbar_i: rbar_i + 1]
+ # in_view = array[lbar_i-1: rbar_i+1]
+
+ # XXX: same as ^
+ # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1]
+ if profiler:
+ profiler('index arithmetic for slicing')
+
+ if array_field:
+ array = array[array_field]
+
+ return (
+ # abs indices + full data set
+ abs_slc.start,
+ abs_slc.stop,
+ array,
+
+ # relative (read) indices + in view data
+ read_slc.start,
+ read_slc.stop,
+ in_view,
+ )
+
+ def update_graphics(
+ self,
+ render: bool = True,
+ array_key: str | None = None,
+
+ profiler: Profiler | None = None,
+ do_append: bool = True,
+
+ **kwargs,
+
+ ) -> pg.GraphicsObject:
+ '''
+ Read latest datums from shm and render to (incrementally)
+ render to graphics.
+
+ '''
+ profiler = Profiler(
+ msg=f'Viz.update_graphics() for {self.name}',
+ disabled=not pg_profile_enabled(),
+ ms_threshold=4,
+ # ms_threshold=ms_slower_then,
+ )
+ # shm read and slice to view
+ read = (
+ xfirst, xlast, src_array,
+ ivl, ivr, in_view,
+ ) = self.read(profiler=profiler)
+
+ profiler('read src shm data')
+
+ graphics = self.graphics
+
+ if (
+ not in_view.size
+ or not render
+ ):
+ # print('exiting early')
+ return graphics
+
+ should_redraw: bool = False
+ ds_allowed: bool = True # guard for m4 activation
+
+ # TODO: probably specialize ``Renderer`` types instead of
+ # these logic checks?
+ # - put these blocks into a `.load_renderer()` meth?
+ # - consider a OHLCRenderer, StepCurveRenderer, Renderer?
+ r = self._src_r
+ if isinstance(graphics, BarItems):
+ # XXX: special case where we change out graphics
+ # to a line after a certain uppx threshold.
+ (
+ graphics,
+ r,
+ should_redraw,
+ ds_allowed, # in line mode?
+ ) = render_baritems(
+ self,
+ graphics,
+ read,
+ profiler,
+ **kwargs,
+ )
+
+ elif not r:
+ if isinstance(graphics, StepCurve):
+
+ r = self._src_r = Renderer(
+ viz=self,
+ fmtr=StepCurveFmtr(
+ shm=self.shm,
+ viz=self,
+ ),
+ )
+
+ else:
+ r = self._src_r
+ if not r:
+ # just using for ``.diff()`` atm..
+ r = self._src_r = Renderer(
+ viz=self,
+ fmtr=IncrementalFormatter(
+ shm=self.shm,
+ viz=self,
+ ),
+ )
+
+ # ``Curve`` derivative case(s):
+ array_key = array_key or self.name
+
+ # ds update config
+ new_sample_rate: bool = False
+ should_ds: bool = r._in_ds
+ showing_src_data: bool = not r._in_ds
+
+ # downsampling incremental state checking
+ # check for and set std m4 downsample conditions
+ uppx = graphics.x_uppx()
+ uppx_diff = (uppx - self._last_uppx)
+ profiler(f'diffed uppx {uppx}')
+ if (
+ uppx > 1
+ and abs(uppx_diff) >= 1
+ and ds_allowed
+ ):
+ log.debug(
+ f'{array_key} sampler change: {self._last_uppx} -> {uppx}'
+ )
+ self._last_uppx = uppx
+
+ new_sample_rate = True
+ showing_src_data = False
+ should_ds = True
+ should_redraw = True
+
+ # "back to source" case:
+ # this more or less skips use of the m4 downsampler
+ # inside ``Renderer.render()`` which results in a path
+ # drawn verbatim to match the xy source data.
+ elif (
+ uppx <= 2
+ and self._in_ds
+ ):
+ # we should de-downsample back to our original
+ # source data so we clear our path data in prep
+ # to generate a new one from original source data.
+ new_sample_rate = True
+ should_ds = False
+ should_redraw = True
+ showing_src_data = True
+
+ # MAIN RENDER LOGIC:
+ # - determine in view data and redraw on range change
+ # - determine downsampling ops if needed
+ # - (incrementally) update ``QPainterPath``
+
+ out = r.render(
+ read,
+ array_key,
+ profiler,
+ uppx=uppx,
+
+ # TODO: better way to detect and pass this?
+ # if we want to eventually cache renderers for a given uppx
+ # we should probably use this as a key + state?
+ should_redraw=should_redraw,
+ new_sample_rate=new_sample_rate,
+ should_ds=should_ds,
+ showing_src_data=showing_src_data,
+
+ do_append=do_append,
+ )
+
+ if not out:
+ log.warning(f'{self.name} failed to render!?')
+ return graphics
+
+ path, reset_cache = out
+
+ # XXX: SUPER UGGGHHH... without this we get stale cache
+ # graphics that "smear" across the view horizontally
+ # when panning and the first datum is out of view..
+ if (
+ reset_cache
+ ):
+ # assign output paths to graphicis obj but
+ # after a coords-cache reset.
+ with graphics.reset_cache():
+ graphics.path = r.path
+ graphics.fast_path = r.fast_path
+ else:
+ # assign output paths to graphicis obj
+ graphics.path = r.path
+ graphics.fast_path = r.fast_path
+
+ graphics.draw_last_datum(
+ path,
+ src_array,
+ reset_cache,
+ array_key,
+ index_field=self.index_field,
+ )
+ graphics.update()
+ profiler('.update()')
+
+ # TODO: does this actuallly help us in any way (prolly should
+ # look at the source / ask ogi). I think it avoid artifacts on
+ # wheel-scroll downsampling curve updates?
+ # TODO: is this ever better?
+ # graphics.prepareGeometryChange()
+ # profiler('.prepareGeometryChange()')
+
+ # track downsampled state
+ self._in_ds = r._in_ds
+
+ return graphics
+
+ def draw_last(
+ self,
+ array_key: Optional[str] = None,
+ only_last_uppx: bool = False,
+
+ ) -> None:
+
+ # shm read and slice to view
+ (
+ xfirst, xlast, src_array,
+ ivl, ivr, in_view,
+ ) = self.read()
+
+ g = self.graphics
+ array_key = array_key or self.name
+ x, y = g.draw_last_datum(
+ g.path,
+ src_array,
+ False, # never reset path
+ array_key,
+ self.index_field,
+ )
+
+ # the renderer is downsampling we choose
+ # to always try and update a single (interpolating)
+ # line segment that spans and tries to display
+ # the last uppx's worth of datums.
+ # we only care about the last pixel's
+ # worth of data since that's all the screen
+ # can represent on the last column where
+ # the most recent datum is being drawn.
+ if (
+ self._in_ds
+ or only_last_uppx
+ ):
+ dsg = self.ds_graphics or self.graphics
+
+ # XXX: pretty sure we don't need this?
+ # if isinstance(g, Curve):
+ # with dsg.reset_cache():
+ uppx = round(self._last_uppx)
+ y = y[-uppx:]
+ ymn, ymx = y.min(), y.max()
+ # print(f'drawing uppx={uppx} mxmn line: {ymn}, {ymx}')
+ try:
+ iuppx = x[-uppx]
+ except IndexError:
+ # we're less then an x-px wide so just grab the start
+ # datum index.
+ iuppx = x[0]
+
+ dsg._last_line = QLineF(
+ iuppx, ymn,
+ x[-1], ymx,
+ )
+ # print(f'updating DS curve {self.name}')
+ dsg.update()
+
+ else:
+ # print(f'updating NOT DS curve {self.name}')
+ g.update()
+
+ def default_view(
+ self,
+ bars_from_y: int = int(616 * 3/8),
+ y_offset: int = 0, # in datums
+ do_ds: bool = True,
+
+ ) -> None:
+ '''
+ Set the plot's viewbox to a "default" startup setting where
+ we try to show the underlying data range sanely.
+
+ '''
+ shm: ShmArray = self.shm
+ array: np.ndarray = shm.array
+ view: ChartView = self.plot.vb
+ (
+ vl,
+ first_datum,
+ datum_start,
+ datum_stop,
+ last_datum,
+ vr,
+ ) = self.datums_range(array=array)
+
+ # invalid case: view is not ordered correctly
+ # return and expect caller to sort it out.
+ if (
+ vl > vr
+ ):
+ log.warning(
+ 'Skipping `.default_view()` viewbox not initialized..\n'
+ f'l -> r: {vl} -> {vr}\n'
+ f'datum_start -> datum_stop: {datum_start} -> {datum_stop}\n'
+ )
+ return
+
+ chartw: ChartPlotWidget = self.plot.getViewWidget()
+ index_field = self.index_field
+ step = self.index_step()
+
+ if index_field == 'time':
+ # transform l -> r view range values into
+ # data index domain to determine how view
+ # should be reset to better match data.
+ read_slc = slice_from_time(
+ array,
+ start_t=vl,
+ stop_t=vr,
+ step=step,
+ )
+ else:
+ read_slc = slice(0, datum_stop - datum_start + 1)
+
+ index_iv = array[index_field][read_slc]
+ uppx: float = self.graphics.x_uppx() or 1
+
+ # l->r distance in scene units, no larger then data span
+ data_diff = last_datum - first_datum
+ rl_diff = vr - vl
+ rescale_to_data: bool = False
+ # new_uppx: float = 1
+
+ if rl_diff > data_diff:
+ rescale_to_data = True
+ rl_diff = data_diff
+ new_uppx: float = data_diff / self.px_width()
+
+ # orient by offset from the y-axis including
+ # space to compensate for the L1 labels.
+ if not y_offset:
+ _, l1_offset = chartw.pre_l1_xs()
+
+ offset = l1_offset
+
+ if (
+ rescale_to_data
+ ):
+ offset = (offset / uppx) * new_uppx
+
+ else:
+ offset = (y_offset * step) + uppx*step
+
+ # align right side of view to the rightmost datum + the selected
+ # offset from above.
+ r_reset = (self.graphics.x_last() or last_datum) + offset
+
+ # no data is in view so check for the only 2 sane cases:
+ # - entire view is LEFT of data
+ # - entire view is RIGHT of data
+ if index_iv.size == 0:
+ log.warning(f'No data in view for {vl} -> {vr}')
+
+ # 2 cases either the view is to the left or right of the
+ # data set.
+ if (
+ vl <= first_datum
+ and vr <= first_datum
+ ):
+ l_reset = first_datum
+
+ elif (
+ vl >= last_datum
+ and vr >= last_datum
+ ):
+ l_reset = r_reset - rl_diff
+
+ else:
+ raise RuntimeError(f'Unknown view state {vl} -> {vr}')
+
+ else:
+ # maintain the l->r view distance
+ l_reset = r_reset - rl_diff
+
+ # remove any custom user yrange setttings
+ if chartw._static_yrange == 'axis':
+ chartw._static_yrange = None
+
+ view.setXRange(
+ min=l_reset,
+ max=r_reset,
+ padding=0,
+ )
+
+ if do_ds:
+ view.maybe_downsample_graphics()
+ view._set_yrange()
+
+ # caller should do this!
+ # self.linked.graphics_cycle()
+
+ def incr_info(
+ self,
+ ds: DisplayState,
+ update_uppx: float = 16,
+ is_1m: bool = False,
+
+ ) -> tuple:
+
+ _, _, _, r = self.bars_range() # most recent right datum index in-view
+ lasts = self.shm.array[-1]
+ i_step = lasts['index'] # last index-specific step.
+ i_step_t = lasts['time'] # last time step.
+
+ # fqsn = self.flume.symbol.fqsn
+
+ # check if "last (is) in view" -> is a real-time update necessary?
+ if self.index_field == 'index':
+ liv = (r >= i_step)
+ else:
+ liv = (r >= i_step_t)
+
+ # compute the first available graphic obj's x-units-per-pixel
+ # TODO: make this not loop through all vizs each time!
+ uppx = self.plot.vb.x_uppx()
+
+ # NOTE: this used to be implemented in a dedicated
+ # "increment task": ``check_for_new_bars()`` but it doesn't
+ # make sense to do a whole task switch when we can just do
+ # this simple index-diff and all the fsp sub-curve graphics
+ # are diffed on each draw cycle anyway; so updates to the
+ # "curve" length is already automatic.
+ globalz = ds.globalz
+ varz = ds.hist_vars if is_1m else ds.vars
+
+ last_key = 'i_last_slow_t' if is_1m else 'i_last_t'
+ glast = globalz[last_key]
+
+ # calc datums diff since last global increment
+ i_diff_t: float = i_step_t - glast
+
+ # when the current step is now greater then the last we have
+ # read from the display state globals, we presume that the
+ # underlying source shm buffer has added a new sample and thus
+ # we should increment the global view a step (i.e. tread the
+ # view in place to keep the current datum at the same spot on
+ # screen).
+ should_tread: bool = False
+ if i_diff_t > 0:
+ globalz[last_key] = i_step_t
+ should_tread = True
+
+ # update the "last datum" (aka extending the vizs graphic with
+ # new data) only if the number of unit steps is >= the number of
+ # such unit steps per pixel (aka uppx). Iow, if the zoom level
+ # is such that a datum(s) update to graphics wouldn't span
+ # to a new pixel, we don't update yet.
+ i_last_append = varz['i_last_append']
+ append_diff = i_step - i_last_append
+
+ do_px_step = append_diff >= uppx
+ do_rt_update = (uppx < update_uppx)
+
+ if (
+ do_px_step
+ ):
+ varz['i_last_append'] = i_step
+
+ # print(
+ # f'DOING APPEND => {fqsn}\n'
+ # f'i_step: {i_step}\n'
+ # f'i_step_t: {i_step_t}\n'
+ # f'glast: {glast}\n'
+ # f'last_append: {i_last_append}\n'
+ # f'r: {r}\n'
+ # '-----------------------------\n'
+ # f'uppx: {uppx}\n'
+ # f'liv: {liv}\n'
+ # f'do_px_step: {do_px_step}\n'
+ # f'i_diff_t: {i_diff_t}\n'
+ # f'do_rt_update: {do_rt_update}\n'
+ # f'append_diff: {append_diff}\n'
+ # f'should_tread: {should_tread}\n'
+ # )
+
+ varz['i_last'] = i_step
+
+ # TODO: pack this into a struct?
+ return (
+ uppx,
+ liv,
+ do_px_step,
+ i_diff_t,
+ append_diff,
+ do_rt_update,
+ should_tread,
+ )
+
+ def px_width(self) -> float:
+ '''
+ Return the width of the view box containing
+ this graphic in pixel units.
+
+ '''
+ vb = self.plot.vb
+ if not vb:
+ return 0
+
+ vl, vr = self.view_range()
+
+ return vb.mapViewToDevice(
+ QLineF(
+ vl, 0,
+ vr, 0,
+ )
+ ).length()
diff --git a/piker/ui/_display.py b/piker/ui/_display.py
index 475cec55..e544b64d 100644
--- a/piker/ui/_display.py
+++ b/piker/ui/_display.py
@@ -43,12 +43,14 @@ from ..data.types import Struct
from ..data._sharedmem import (
ShmArray,
)
+from ..data._sampling import _tick_groups
from ._axes import YAxisLabel
from ._chart import (
ChartPlotWidget,
LinkedSplits,
GodWidget,
)
+from ._dataviz import Viz
from ._l1 import L1Labels
from ._style import hcolor
from ._fsp import (
@@ -63,7 +65,6 @@ from ._forms import (
)
from . import _pg_overrides as pgo
# from ..data._source import tf_in_1s
-from ..data._sampling import _tick_groups
from .order_mode import (
open_order_mode,
OrderMode,
@@ -78,7 +79,7 @@ from .._profile import Profiler
log = get_logger(__name__)
-# TODO: delegate this to each `Flow.maxmin()` which includes
+# TODO: delegate this to each `Viz.maxmin()` which includes
# caching and further we should implement the following stream based
# approach, likely with ``numba``:
# https://arxiv.org/abs/cs/0610046
@@ -101,7 +102,8 @@ def chart_maxmin(
Compute max and min datums "in view" for range limits.
'''
- last_bars_range = chart.bars_range()
+ main_viz = chart.get_viz(chart.name)
+ last_bars_range = main_viz.bars_range()
out = chart.maxmin(name=fqsn)
if out is None:
@@ -113,7 +115,7 @@ def chart_maxmin(
# TODO: we need to NOT call this to avoid a manual
# np.max/min trigger and especially on the vlm_chart
- # flows which aren't shown.. like vlm?
+ # vizs which aren't shown.. like vlm?
if vlm_chart:
out = vlm_chart.maxmin()
if out:
@@ -127,10 +129,6 @@ def chart_maxmin(
)
-_i_last: int = 0
-_i_last_append: int = 0
-
-
class DisplayState(Struct):
'''
Chart-local real-time graphics state container.
@@ -141,11 +139,12 @@ class DisplayState(Struct):
maxmin: Callable
flume: Flume
- ohlcv: ShmArray
- hist_ohlcv: ShmArray
- # high level chart handles
+ # high level chart handles and underlying ``Viz``
chart: ChartPlotWidget
+ viz: Viz
+ hist_chart: ChartPlotWidget
+ hist_viz: Viz
# axis labels
l1: L1Labels
@@ -153,111 +152,82 @@ class DisplayState(Struct):
hist_last_price_sticky: YAxisLabel
# misc state tracking
- vars: dict[str, Any] = field(default_factory=lambda: {
- 'tick_margin': 0,
- 'i_last': 0,
- 'i_last_append': 0,
- 'last_mx_vlm': 0,
- 'last_mx': 0,
- 'last_mn': 0,
- })
+ vars: dict[str, Any] = field(
+ default_factory=lambda: {
+ 'tick_margin': 0,
+ 'i_last': 0,
+ 'i_last_append': 0,
+ 'last_mx_vlm': 0,
+ 'last_mx': 0,
+ 'last_mn': 0,
+ }
+ )
+ hist_vars: dict[str, Any] = field(
+ default_factory=lambda: {
+ 'tick_margin': 0,
+ 'i_last': 0,
+ 'i_last_append': 0,
+ 'last_mx_vlm': 0,
+ 'last_mx': 0,
+ 'last_mn': 0,
+ }
+ )
+
+ globalz: None | dict[str, Any] = None
vlm_chart: Optional[ChartPlotWidget] = None
vlm_sticky: Optional[YAxisLabel] = None
wap_in_history: bool = False
- def incr_info(
- self,
- chart: Optional[ChartPlotWidget] = None,
- shm: Optional[ShmArray] = None,
- state: Optional[dict] = None, # pass in a copy if you don't
- update_state: bool = True,
- update_uppx: float = 16,
- is_1m: bool = False,
+async def increment_history_view(
+ ds: DisplayState,
+):
+ hist_chart = ds.hist_chart
+ hist_viz = ds.hist_viz
+ assert 'hist' in hist_viz.shm.token['shm_name']
- ) -> tuple:
+ # TODO: seems this is more reliable at keeping the slow
+ # chart incremented in view more correctly?
+ # - It might make sense to just inline this logic with the
+ # main display task? => it's a tradeoff of slower task
+ # wakeups/ctx switches verus logic checks (as normal)
+ # - we need increment logic that only does the view shift
+ # call when the uppx permits/needs it
+ async with hist_viz.flume.index_stream(int(1)) as istream:
+ async for msg in istream:
- shm = shm or self.ohlcv
- chart = chart or self.chart
- # state = state or self.vars
-
- if (
- not update_state
- and state
- ):
- state = state.copy()
-
- # compute the first available graphic's x-units-per-pixel
- uppx = chart.view.x_uppx()
-
- # NOTE: this used to be implemented in a dedicated
- # "increment task": ``check_for_new_bars()`` but it doesn't
- # make sense to do a whole task switch when we can just do
- # this simple index-diff and all the fsp sub-curve graphics
- # are diffed on each draw cycle anyway; so updates to the
- # "curve" length is already automatic.
-
- # increment the view position by the sample offset.
- # i_step = shm.index
- i_step = shm.array[-1]['time']
- # i_diff = i_step - state['i_last']
- # state['i_last'] = i_step
- global _i_last, _i_last_append
- i_diff = i_step - _i_last
- # update global state
- if (
- # state is None
- not is_1m
- and i_diff > 0
- ):
- _i_last = i_step
-
- # append_diff = i_step - state['i_last_append']
- append_diff = i_step - _i_last_append
-
- # real-time update necessary?
- _, _, _, r = chart.bars_range()
- liv = r >= shm.index
-
- # update the "last datum" (aka extending the flow graphic with
- # new data) only if the number of unit steps is >= the number of
- # such unit steps per pixel (aka uppx). Iow, if the zoom level
- # is such that a datum(s) update to graphics wouldn't span
- # to a new pixel, we don't update yet.
- do_append = (
- append_diff >= uppx
- and i_diff
- )
- if (
- do_append
- and not is_1m
- ):
- _i_last_append = i_step
- # fqsn = self.flume.symbol.fqsn
+ # l3 = ds.viz.shm.array[-3:]
# print(
- # f'DOING APPEND => {fqsn}\n'
- # f'i_step:{i_step}\n'
- # f'i_diff:{i_diff}\n'
- # f'last:{_i_last}\n'
- # f'last_append:{_i_last_append}\n'
- # f'append_diff:{append_diff}\n'
- # f'r: {r}\n'
- # f'liv: {liv}\n'
- # f'uppx: {uppx}\n'
+ # f'fast step for {ds.flume.symbol.fqsn}:\n'
+ # f'{list(l3["time"])}\n'
+ # f'{l3}\n'
# )
+ # check if slow chart needs an x-domain shift and/or
+ # y-range resize.
+ (
+ uppx,
+ liv,
+ do_append,
+ i_diff_t,
+ append_diff,
+ do_rt_update,
+ should_tread,
- do_rt_update = uppx < update_uppx
+ ) = hist_viz.incr_info(
+ ds=ds,
+ is_1m=True,
+ )
- # TODO: pack this into a struct
- return (
- uppx,
- liv,
- do_append,
- i_diff,
- append_diff,
- do_rt_update,
- )
+ if (
+ do_append
+ and liv
+ ):
+ hist_viz.plot.vb._set_yrange()
+
+ # check if tread-in-place x-shift is needed
+ if should_tread:
+ hist_chart.increment_view(datums=append_diff)
async def graphics_update_loop(
@@ -293,7 +263,17 @@ async def graphics_update_loop(
hist_chart = godwidget.hist_linked.chart
assert hist_chart
+ # per-viz-set global last index tracking for global chart
+ # view UX incrementing; these values are singleton
+ # per-multichart-set such that automatic x-domain shifts are only
+ # done once per time step update.
+ globalz = {
+ 'i_last_t': 0, # multiview-global fast (1s) step index
+ 'i_last_slow_t': 0, # multiview-global slow (1m) step index
+ }
+
dss: dict[str, DisplayState] = {}
+
for fqsn, flume in feed.flumes.items():
ohlcv = flume.rt_shm
hist_ohlcv = flume.hist_shm
@@ -301,17 +281,26 @@ async def graphics_update_loop(
fqsn = symbol.fqsn
# update last price sticky
- fast_pi = fast_chart._flows[fqsn].plot
+ fast_viz = fast_chart._vizs[fqsn]
+ index_field = fast_viz.index_field
+ fast_pi = fast_viz.plot
last_price_sticky = fast_pi.getAxis('right')._stickies[fqsn]
last_price_sticky.update_from_data(
- *ohlcv.array[-1][['index', 'close']]
+ *ohlcv.array[-1][[
+ index_field,
+ 'close',
+ ]]
)
last_price_sticky.show()
- slow_pi = hist_chart._flows[fqsn].plot
+ hist_viz = hist_chart._vizs[fqsn]
+ slow_pi = hist_viz.plot
hist_last_price_sticky = slow_pi.getAxis('right')._stickies[fqsn]
hist_last_price_sticky.update_from_data(
- *hist_ohlcv.array[-1][['index', 'close']]
+ *hist_ohlcv.array[-1][[
+ index_field,
+ 'close',
+ ]]
)
vlm_chart = vlm_charts[fqsn]
@@ -356,105 +345,63 @@ async def graphics_update_loop(
tick_margin = 3 * tick_size
fast_chart.show()
- last_quote = time.time()
- # global _i_last
- i_last = ohlcv.index
+ last_quote_s = time.time()
dss[fqsn] = ds = linked.display_state = DisplayState(**{
'godwidget': godwidget,
'quotes': {},
'maxmin': maxmin,
+
'flume': flume,
- 'ohlcv': ohlcv,
- 'hist_ohlcv': hist_ohlcv,
+
'chart': fast_chart,
+ 'viz': fast_viz,
'last_price_sticky': last_price_sticky,
+
+ 'hist_chart': hist_chart,
+ 'hist_viz': hist_viz,
'hist_last_price_sticky': hist_last_price_sticky,
+
'l1': l1,
'vars': {
'tick_margin': tick_margin,
- 'i_last': i_last,
- 'i_last_append': i_last,
+ 'i_last': 0,
+ 'i_last_append': 0,
'last_mx_vlm': last_mx_vlm,
'last_mx': last_mx,
'last_mn': last_mn,
- }
+ },
+ 'globalz': globalz,
})
if vlm_chart:
- vlm_pi = vlm_chart._flows['volume'].plot
+ vlm_pi = vlm_chart._vizs['volume'].plot
vlm_sticky = vlm_pi.getAxis('right')._stickies['volume']
ds.vlm_chart = vlm_chart
ds.vlm_sticky = vlm_sticky
fast_chart.default_view()
- # TODO: probably factor this into some kinda `DisplayState`
- # API that can be reused at least in terms of pulling view
- # params (eg ``.bars_range()``).
- async def increment_history_view():
- i_last = hist_ohlcv.index
- state = ds.vars.copy() | {
- 'i_last_append': i_last,
- 'i_last': i_last,
- }
- _, hist_step_size_s, _ = flume.get_ds_info()
+ # ds.hist_vars.update({
+ # 'i_last_append': 0,
+ # 'i_last': 0,
+ # })
- async with flume.index_stream(
- # int(hist_step_size_s)
- # TODO: seems this is more reliable at keeping the slow
- # chart incremented in view more correctly?
- # - It might make sense to just inline this logic with the
- # main display task? => it's a tradeoff of slower task
- # wakeups/ctx switches verus logic checks (as normal)
- # - we need increment logic that only does the view shift
- # call when the uppx permits/needs it
- int(1),
- ) as istream:
- async for msg in istream:
+ nurse.start_soon(
+ increment_history_view,
+ ds,
+ )
- # check if slow chart needs an x-domain shift and/or
- # y-range resize.
- (
- uppx,
- liv,
- do_append,
- i_diff,
- append_diff,
- do_rt_update,
- ) = ds.incr_info(
- chart=hist_chart,
- shm=ds.hist_ohlcv,
- state=state,
- is_1m=True,
- # update_state=False,
- )
- # print(
- # f'liv: {liv}\n'
- # f'do_append: {do_append}\n'
- # f'append_diff: {append_diff}\n'
- # )
-
- if (
- do_append
- and liv
- ):
- # hist_chart.increment_view(steps=i_diff)
- flow = hist_chart._flows[fqsn]
- flow.plot.vb._set_yrange(
- # yrange=hist_chart.maxmin(name=fqsn)
- )
- # hist_chart.view._set_yrange(yrange=hist_chart.maxmin())
-
- nurse.start_soon(increment_history_view)
+ if ds.hist_vars['i_last'] < ds.hist_vars['i_last_append']:
+ breakpoint()
# main real-time quotes update loop
stream: tractor.MsgStream
async with feed.open_multi_stream() as stream:
assert stream
async for quotes in stream:
- quote_period = time.time() - last_quote
+ quote_period = time.time() - last_quote_s
quote_rate = round(
1/quote_period, 1) if quote_period > 0 else float('inf')
if (
@@ -467,7 +414,7 @@ async def graphics_update_loop(
):
log.warning(f'High quote rate {symbol.key}: {quote_rate}')
- last_quote = time.time()
+ last_quote_s = time.time()
for sym, quote in quotes.items():
ds = dss[sym]
@@ -513,12 +460,12 @@ def graphics_update_cycle(
chart = ds.chart
# TODO: just pass this as a direct ref to avoid so many attr accesses?
hist_chart = ds.godwidget.hist_linked.chart
- assert hist_chart
flume = ds.flume
sym = flume.symbol
fqsn = sym.fqsn
- main_flow = chart._flows[fqsn]
+ main_viz = chart._vizs[fqsn]
+ index_field = main_viz.index_field
profiler = Profiler(
msg=f'Graphics loop cycle for: `{chart.name}`',
@@ -535,54 +482,21 @@ def graphics_update_cycle(
# rt "HFT" chart
l1 = ds.l1
- # ohlcv = ds.ohlcv
ohlcv = flume.rt_shm
array = ohlcv.array
- vars = ds.vars
- tick_margin = vars['tick_margin']
+ varz = ds.vars
+ tick_margin = varz['tick_margin']
(
uppx,
liv,
do_append,
- i_diff,
+ i_diff_t,
append_diff,
do_rt_update,
- ) = ds.incr_info()
-
- # don't real-time "shift" the curve to the
- # left unless we get one of the following:
- if (
- (
- do_append
- and liv
- )
- or trigger_all
- ):
- # print(f'INCREMENTING {fqsn}')
- chart.increment_view(steps=i_diff)
- main_flow.plot.vb._set_yrange(
- # yrange=(mn, mx),
- )
-
- # NOTE: since vlm and ohlc charts are axis linked now we don't
- # need the double increment request?
- # if vlm_chart:
- # vlm_chart.increment_view(steps=i_diff)
-
- profiler('view incremented')
-
- # frames_by_type: dict[str, dict] = {}
- # lasts = {}
-
- # build tick-type "frames" of tick sequences since
- # likely the tick arrival rate is higher then our
- # (throttled) quote stream rate.
-
- # iterate in FIFO order per tick-frame
- # if sym != fqsn:
- # continue
+ should_tread,
+ ) = main_viz.incr_info(ds=ds)
# TODO: we should only run mxmn when we know
# an update is due via ``do_append`` above.
@@ -597,27 +511,10 @@ def graphics_update_cycle(
mn = mn_in_view - tick_margin
profiler('`ds.maxmin()` call')
- if (
- prepend_update_index is not None
- and lbar > prepend_update_index
- ):
- # on a history update (usually from the FSP subsys)
- # if the segment of history that is being prepended
- # isn't in view there is no reason to do a graphics
- # update.
- log.debug('Skipping prepend graphics cycle: frame not in view')
- return
-
- # TODO: eventually we want to separate out the utrade (aka
- # dark vlm prices) here and show them as an additional
- # graphic.
+ # TODO: eventually we want to separate out the dark vlm and show
+ # them as an additional graphic.
clear_types = _tick_groups['clears']
- # XXX: if we wanted to iterate in "latest" (i.e. most
- # current) tick first order as an optimization where we only
- # update from the last tick from each type class.
- # last_clear_updated: bool = False
-
# update ohlc sampled price bars
if (
do_rt_update
@@ -629,7 +526,7 @@ def graphics_update_cycle(
# chart.name,
# do_append=do_append,
)
- main_flow.draw_last(array_key=fqsn)
+ main_viz.draw_last(array_key=fqsn)
hist_chart.update_graphics_from_flow(
fqsn,
@@ -637,10 +534,25 @@ def graphics_update_cycle(
# do_append=do_append,
)
- # NOTE: we always update the "last" datum
- # since the current range should at least be updated
- # to it's max/min on the last pixel.
- typs: set[str] = set()
+ # don't real-time "shift" the curve to the
+ # left unless we get one of the following:
+ if (
+ (
+ should_tread
+ and do_append
+ and liv
+ )
+ or trigger_all
+ ):
+ chart.increment_view(datums=append_diff)
+ main_viz.plot.vb._set_yrange()
+
+ # NOTE: since vlm and ohlc charts are axis linked now we don't
+ # need the double increment request?
+ # if vlm_chart:
+ # vlm_chart.increment_view(datums=append_diff)
+
+ profiler('view incremented')
# from pprint import pformat
# frame_counts = {
@@ -665,11 +577,6 @@ def graphics_update_cycle(
price = tick.get('price')
size = tick.get('size')
- if typ in typs:
- continue
-
- typs.add(typ)
-
# compute max and min prices (including bid/ask) from
# tick frames to determine the y-range for chart
# auto-scaling.
@@ -679,7 +586,6 @@ def graphics_update_cycle(
mn = min(price - tick_margin, mn)
if typ in clear_types:
-
# XXX: if we only wanted to update graphics from the
# "current"/"latest received" clearing price tick
# once (see alt iteration order above).
@@ -692,7 +598,10 @@ def graphics_update_cycle(
# set.
# update price sticky(s)
- end_ic = array[-1][['index', 'close']]
+ end_ic = array[-1][[
+ index_field,
+ 'close',
+ ]]
ds.last_price_sticky.update_from_data(*end_ic)
ds.hist_last_price_sticky.update_from_data(*end_ic)
@@ -740,7 +649,7 @@ def graphics_update_cycle(
l1.bid_label.update_fields({'level': price, 'size': size})
# check for y-range re-size
- if (mx > vars['last_mx']) or (mn < vars['last_mn']):
+ if (mx > varz['last_mx']) or (mn < varz['last_mn']):
# fast chart resize case
if (
@@ -748,7 +657,7 @@ def graphics_update_cycle(
and not chart._static_yrange == 'axis'
):
# main_vb = chart.view
- main_vb = chart._flows[fqsn].plot.vb
+ main_vb = chart._vizs[fqsn].plot.vb
if (
main_vb._ic is None
or not main_vb._ic.is_set()
@@ -765,6 +674,8 @@ def graphics_update_cycle(
)
# check if slow chart needs a resize
+
+ hist_viz = hist_chart._vizs[fqsn]
(
_,
hist_liv,
@@ -772,33 +683,29 @@ def graphics_update_cycle(
_,
_,
_,
- ) = ds.incr_info(
- chart=hist_chart,
- shm=ds.hist_ohlcv,
- update_state=False,
+ _,
+ ) = hist_viz.incr_info(
+ ds=ds,
is_1m=True,
)
if hist_liv:
- flow = hist_chart._flows[fqsn]
- flow.plot.vb._set_yrange(
- # yrange=hist_chart.maxmin(name=fqsn),
- )
+ hist_viz.plot.vb._set_yrange()
# XXX: update this every draw cycle to make L1-always-in-view work.
- vars['last_mx'], vars['last_mn'] = mx, mn
+ varz['last_mx'], varz['last_mn'] = mx, mn
- # run synchronous update on all linked flows
- # TODO: should the "main" (aka source) flow be special?
- for curve_name, flow in chart._flows.items():
+ # run synchronous update on all linked viz
+ # TODO: should the "main" (aka source) viz be special?
+ for curve_name, viz in chart._vizs.items():
# update any overlayed fsp flows
if (
# curve_name != chart.data_key
curve_name != fqsn
- and not flow.is_ohlc
+ and not viz.is_ohlc
):
update_fsp_chart(
chart,
- flow,
+ viz,
curve_name,
array_key=curve_name,
)
@@ -812,7 +719,7 @@ def graphics_update_cycle(
# and not do_append
# and not do_rt_update
):
- flow.draw_last(
+ viz.draw_last(
array_key=curve_name,
only_last_uppx=True,
)
@@ -821,11 +728,14 @@ def graphics_update_cycle(
# TODO: can we unify this with the above loop?
if vlm_chart:
# print(f"DOING VLM {fqsn}")
- vlm_flows = vlm_chart._flows
+ vlm_vizs = vlm_chart._vizs
# always update y-label
ds.vlm_sticky.update_from_data(
- *array[-1][['index', 'volume']]
+ *array[-1][[
+ index_field,
+ 'volume',
+ ]]
)
if (
@@ -855,7 +765,7 @@ def graphics_update_cycle(
profiler('`vlm_chart.update_graphics_from_flow()`')
if (
- mx_vlm_in_view != vars['last_mx_vlm']
+ mx_vlm_in_view != varz['last_mx_vlm']
):
yrange = (0, mx_vlm_in_view * 1.375)
vlm_chart.view._set_yrange(
@@ -863,24 +773,24 @@ def graphics_update_cycle(
)
profiler('`vlm_chart.view._set_yrange()`')
# print(f'mx vlm: {last_mx_vlm} -> {mx_vlm_in_view}')
- vars['last_mx_vlm'] = mx_vlm_in_view
+ varz['last_mx_vlm'] = mx_vlm_in_view
# update all downstream FSPs
- for curve_name, flow in vlm_flows.items():
+ for curve_name, viz in vlm_vizs.items():
if (
curve_name not in {'volume', fqsn}
- and flow.render
+ and viz.render
and (
liv and do_rt_update
or do_append
)
- # and not flow.is_ohlc
+ # and not viz.is_ohlc
# and curve_name != fqsn
):
update_fsp_chart(
vlm_chart,
- flow,
+ viz,
curve_name,
array_key=curve_name,
# do_append=uppx < update_uppx,
@@ -889,7 +799,7 @@ def graphics_update_cycle(
# is this even doing anything?
# (pretty sure it's the real-time
# resizing from last quote?)
- fvb = flow.plot.vb
+ fvb = viz.plot.vb
fvb._set_yrange(
name=curve_name,
)
@@ -905,9 +815,9 @@ def graphics_update_cycle(
# range of that set.
):
# always update the last datum-element
- # graphic for all flows
- # print(f'drawing last {flow.name}')
- flow.draw_last(array_key=curve_name)
+ # graphic for all vizs
+ # print(f'drawing last {viz.name}')
+ viz.draw_last(array_key=curve_name)
async def link_views_with_region(
@@ -937,92 +847,124 @@ async def link_views_with_region(
hist_pi.addItem(region, ignoreBounds=True)
region.setOpacity(6/16)
- flow = rt_chart._flows[flume.symbol.fqsn]
- assert flow
+ viz = rt_chart.get_viz(flume.symbol.fqsn)
+ assert viz
+ index_field = viz.index_field
# XXX: no idea why this doesn't work but it's causing
# a weird placement of the region on the way-far-left..
- # region.setClipItem(flow.graphics)
+ # region.setClipItem(viz.graphics)
+
+ if index_field == 'time':
+
+ # in the (epoch) index case we can map directly
+ # from the fast chart's x-domain values since they are
+ # on the same index as the slow chart.
+
+ def update_region_from_pi(
+ window,
+ viewRange: tuple[tuple, tuple],
+ is_manual: bool = True,
+ ) -> None:
+ # put linear region "in front" in layer terms
+ region.setZValue(10)
+
+ # set the region on the history chart
+ # to the range currently viewed in the
+ # HFT/real-time chart.
+ rng = mn, mx = viewRange[0]
+
+ # hist_viz = hist_chart.get_viz(flume.symbol.fqsn)
+ # hist = hist_viz.shm.array[-3:]
+ # print(
+ # f'mn: {mn}\n'
+ # f'mx: {mx}\n'
+ # f'slow last 3 epochs: {list(hist["time"])}\n'
+ # f'slow last 3: {hist}\n'
+ # )
+
+ region.setRegion(rng)
- # poll for datums load and timestep detection
- for _ in range(100):
- try:
- _, _, ratio = flume.get_ds_info()
- break
- except IndexError:
- await trio.sleep(0.01)
- continue
else:
- raise RuntimeError(
- 'Failed to detect sampling periods from shm!?')
+ # poll for datums load and timestep detection
+ for _ in range(100):
+ try:
+ _, _, ratio = flume.get_ds_info()
+ break
+ except IndexError:
+ await trio.sleep(0.01)
+ continue
+ else:
+ raise RuntimeError(
+ 'Failed to detect sampling periods from shm!?')
- # sampling rate transform math:
- # -----------------------------
- # define the fast chart to slow chart as a linear mapping
- # over the fast index domain `i` to the slow index domain
- # `j` as:
- #
- # j = i - i_offset
- # ------------ + j_offset
- # j/i
- #
- # conversely the inverse function is:
- #
- # i = j/i * (j - j_offset) + i_offset
- #
- # Where `j_offset` is our ``izero_hist`` and `i_offset` is our
- # `izero_rt`, the ``ShmArray`` offsets which correspond to the
- # indexes in each array where the "current" time is indexed at init.
- # AKA the index where new data is "appended to" and historical data
- # if "prepended from".
- #
- # more practically (and by default) `i` is normally an index
- # into 1s samples and `j` is an index into 60s samples (aka 1m).
- # in the below handlers ``ratio`` is the `j/i` and ``mn``/``mx``
- # are the low and high index input from the source index domain.
+ # sampling rate transform math:
+ # -----------------------------
+ # define the fast chart to slow chart as a linear mapping
+ # over the fast index domain `i` to the slow index domain
+ # `j` as:
+ #
+ # j = i - i_offset
+ # ------------ + j_offset
+ # j/i
+ #
+ # conversely the inverse function is:
+ #
+ # i = j/i * (j - j_offset) + i_offset
+ #
+ # Where `j_offset` is our ``izero_hist`` and `i_offset` is our
+ # `izero_rt`, the ``ShmArray`` offsets which correspond to the
+ # indexes in each array where the "current" time is indexed at init.
+ # AKA the index where new data is "appended to" and historical data
+ # if "prepended from".
+ #
+ # more practically (and by default) `i` is normally an index
+ # into 1s samples and `j` is an index into 60s samples (aka 1m).
+ # in the below handlers ``ratio`` is the `j/i` and ``mn``/``mx``
+ # are the low and high index input from the source index domain.
- def update_region_from_pi(
- window,
- viewRange: tuple[tuple, tuple],
- is_manual: bool = True,
+ def update_region_from_pi(
+ window,
+ viewRange: tuple[tuple, tuple],
+ is_manual: bool = True,
- ) -> None:
- # put linear region "in front" in layer terms
- region.setZValue(10)
+ ) -> None:
+ # put linear region "in front" in layer terms
+ region.setZValue(10)
- # set the region on the history chart
- # to the range currently viewed in the
- # HFT/real-time chart.
- mn, mx = viewRange[0]
- ds_mn = (mn - izero_rt)/ratio
- ds_mx = (mx - izero_rt)/ratio
- lhmn = ds_mn + izero_hist
- lhmx = ds_mx + izero_hist
- # print(
- # f'rt_view_range: {(mn, mx)}\n'
- # f'ds_mn, ds_mx: {(ds_mn, ds_mx)}\n'
- # f'lhmn, lhmx: {(lhmn, lhmx)}\n'
- # )
- region.setRegion((
- lhmn,
- lhmx,
- ))
+ # set the region on the history chart
+ # to the range currently viewed in the
+ # HFT/real-time chart.
+ mn, mx = viewRange[0]
+ ds_mn = (mn - izero_rt)/ratio
+ ds_mx = (mx - izero_rt)/ratio
+ lhmn = ds_mn + izero_hist
+ lhmx = ds_mx + izero_hist
+ # print(
+ # f'rt_view_range: {(mn, mx)}\n'
+ # f'ds_mn, ds_mx: {(ds_mn, ds_mx)}\n'
+ # f'lhmn, lhmx: {(lhmn, lhmx)}\n'
+ # )
+ region.setRegion((
+ lhmn,
+ lhmx,
+ ))
- # TODO: if we want to have the slow chart adjust range to
- # match the fast chart's selection -> results in the
- # linear region expansion never can go "outside of view".
- # hmn, hmx = hvr = hist_chart.view.state['viewRange'][0]
- # print((hmn, hmx))
- # if (
- # hvr
- # and (lhmn < hmn or lhmx > hmx)
- # ):
- # hist_pi.setXRange(
- # lhmn,
- # lhmx,
- # padding=0,
- # )
- # hist_linked.graphics_cycle()
+ # TODO: if we want to have the slow chart adjust range to
+ # match the fast chart's selection -> results in the
+ # linear region expansion never can go "outside of view".
+ # hmn, hmx = hvr = hist_chart.view.state['viewRange'][0]
+ # print((hmn, hmx))
+ # if (
+ # hvr
+ # and (lhmn < hmn or lhmx > hmx)
+ # ):
+ # hist_pi.setXRange(
+ # lhmn,
+ # lhmx,
+ # padding=0,
+ # )
+ # hist_linked.graphics_cycle()
# connect region to be updated on plotitem interaction.
rt_pi.sigRangeChanged.connect(update_region_from_pi)
@@ -1052,11 +994,11 @@ def multi_maxmin(
) -> tuple[float, float]:
'''
- Flows "group" maxmin loop; assumes all named flows
+ Viz "group" maxmin loop; assumes all named vizs
are in the same co-domain and thus can be sorted
as one set.
- Iterates all the named flows and calls the chart
+ Iterates all the named vizs and calls the chart
api to find their range values and return.
TODO: really we should probably have a more built-in API
@@ -1122,7 +1064,7 @@ async def display_symbol_data(
# avoiding needless Qt-in-guest-mode context switches
tick_throttle=min(
round(_quote_throttle_rate/len(fqsns)),
- 22,
+ 22, # aka 6 + 16
),
) as feed:
@@ -1163,10 +1105,11 @@ async def display_symbol_data(
# - gradient in "lightness" based on liquidity, or lifetime in derivs?
palette = itertools.cycle([
# curve color, last bar curve color
- ['i3', 'gray'],
- ['grayer', 'bracket'],
['grayest', 'i3'],
['default_dark', 'default'],
+
+ ['grayer', 'bracket'],
+ ['i3', 'gray'],
])
pis: dict[str, list[pgo.PlotItem, pgo.PlotItem]] = {}
@@ -1176,6 +1119,12 @@ async def display_symbol_data(
tuple[str, Flume]
] = list(feed.flumes.items())
+ # use array int-indexing when no aggregate feed overlays are
+ # loaded.
+ if len(fitems) == 1:
+ from ._dataviz import Viz
+ Viz._index_field = 'index'
+
# for the "first"/selected symbol we create new chart widgets
# and sub-charts for FSPs
fqsn, flume = fitems[0]
@@ -1199,6 +1148,13 @@ async def display_symbol_data(
# sidepane=False,
sidepane=godwidget.search,
)
+
+ # ensure the last datum graphic is generated
+ # for zoom-interaction purposes.
+ hist_chart.get_viz(fqsn).draw_last(
+ array_key=fqsn,
+ # only_last_uppx=True,
+ )
pis.setdefault(fqsn, [None, None])[1] = hist_chart.plotItem
# don't show when not focussed
@@ -1279,7 +1235,7 @@ async def display_symbol_data(
hist_pi.hideAxis('left')
hist_pi.hideAxis('bottom')
- flow = hist_chart.draw_curve(
+ viz = hist_chart.draw_curve(
fqsn,
hist_ohlcv,
flume,
@@ -1292,6 +1248,13 @@ async def display_symbol_data(
last_bar_color=bg_last_bar_color,
)
+ # ensure the last datum graphic is generated
+ # for zoom-interaction purposes.
+ viz.draw_last(
+ array_key=fqsn,
+ # only_last_uppx=True,
+ )
+
hist_pi.vb.maxmin = partial(
hist_chart.maxmin,
name=fqsn,
@@ -1300,8 +1263,8 @@ async def display_symbol_data(
# specially store ref to shm for lookup in display loop
# since only a placeholder of `None` is entered in
# ``.draw_curve()``.
- flow = hist_chart._flows[fqsn]
- assert flow.plot is hist_pi
+ viz = hist_chart._vizs[fqsn]
+ assert viz.plot is hist_pi
pis.setdefault(fqsn, [None, None])[1] = hist_pi
rt_pi = rt_chart.overlay_plotitem(
@@ -1312,7 +1275,7 @@ async def display_symbol_data(
rt_pi.hideAxis('left')
rt_pi.hideAxis('bottom')
- flow = rt_chart.draw_curve(
+ viz = rt_chart.draw_curve(
fqsn,
ohlcv,
flume,
@@ -1333,8 +1296,8 @@ async def display_symbol_data(
# specially store ref to shm for lookup in display loop
# since only a placeholder of `None` is entered in
# ``.draw_curve()``.
- flow = rt_chart._flows[fqsn]
- assert flow.plot is rt_pi
+ viz = rt_chart._vizs[fqsn]
+ assert viz.plot is rt_pi
pis.setdefault(fqsn, [None, None])[0] = rt_pi
rt_chart.setFocus()
@@ -1372,8 +1335,7 @@ async def display_symbol_data(
# trigger another view reset if no sub-chart
hist_chart.default_view()
rt_chart.default_view()
-
- # let Qt run to render all widgets and make sure the
+ # let qt run to render all widgets and make sure the
# sidepanes line up vertically.
await trio.sleep(0)
@@ -1421,9 +1383,6 @@ async def display_symbol_data(
vlm_charts,
)
- rt_chart.default_view()
- await trio.sleep(0)
-
mode: OrderMode
async with (
open_order_mode(
@@ -1436,5 +1395,8 @@ async def display_symbol_data(
rt_linked.mode = mode
rt_chart.default_view()
+ rt_chart.view.enable_auto_yrange()
hist_chart.default_view()
+ hist_chart.view.enable_auto_yrange()
+
await trio.sleep_forever() # let the app run.. bby
diff --git a/piker/ui/_editors.py b/piker/ui/_editors.py
index 3703558a..08f19852 100644
--- a/piker/ui/_editors.py
+++ b/piker/ui/_editors.py
@@ -377,7 +377,7 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
nbars = ixmx - ixmn + 1
chart = self._chart
- data = chart._flows[chart.name].shm.array[ixmn:ixmx]
+ data = chart.get_viz(chart.name).shm.array[ixmn:ixmx]
if len(data):
std = data['close'].std()
diff --git a/piker/ui/_flows.py b/piker/ui/_flows.py
deleted file mode 100644
index 2e04bb37..00000000
--- a/piker/ui/_flows.py
+++ /dev/null
@@ -1,974 +0,0 @@
-# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for pikers)
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-'''
-High level streaming graphics primitives.
-
-This is an intermediate layer which associates real-time low latency
-graphics primitives with underlying FSP related data structures for fast
-incremental update.
-
-'''
-from __future__ import annotations
-from typing import (
- Optional,
-)
-
-import msgspec
-import numpy as np
-import pyqtgraph as pg
-from PyQt5.QtGui import QPainterPath
-from PyQt5.QtCore import QLineF
-
-from ..data._sharedmem import (
- ShmArray,
-)
-from ..data.feed import Flume
-from .._profile import (
- pg_profile_enabled,
- # ms_slower_then,
-)
-from ._pathops import (
- IncrementalFormatter,
- OHLCBarsFmtr, # Plain OHLC renderer
- OHLCBarsAsCurveFmtr, # OHLC converted to line
- StepCurveFmtr, # "step" curve (like for vlm)
- xy_downsample,
-)
-from ._ohlc import (
- BarItems,
- # bar_from_ohlc_row,
-)
-from ._curve import (
- Curve,
- StepCurve,
- FlattenedOHLC,
-)
-from ..log import get_logger
-from .._profile import Profiler
-
-
-log = get_logger(__name__)
-
-
-def render_baritems(
- flow: Flow,
- graphics: BarItems,
- read: tuple[
- int, int, np.ndarray,
- int, int, np.ndarray,
- ],
- profiler: Profiler,
- **kwargs,
-
-) -> None:
- '''
- Graphics management logic for a ``BarItems`` object.
-
- Mostly just logic to determine when and how to downsample an OHLC
- lines curve into a flattened line graphic and when to display one
- graphic or the other.
-
- TODO: this should likely be moved into some kind of better abstraction
- layer, if not a `Renderer` then something just above it?
-
- '''
- bars = graphics
-
- # if no source data renderer exists create one.
- self = flow
- show_bars: bool = False
-
- r = self._src_r
- if not r:
- show_bars = True
-
- # OHLC bars path renderer
- r = self._src_r = Renderer(
- flow=self,
- fmtr=OHLCBarsFmtr(
- shm=flow.shm,
- flow=flow,
- _last_read=read,
- ),
- )
-
- ds_curve_r = Renderer(
- flow=self,
- fmtr=OHLCBarsAsCurveFmtr(
- shm=flow.shm,
- flow=flow,
- _last_read=read,
- ),
- )
-
- curve = FlattenedOHLC(
- name=f'{flow.name}_ds_ohlc',
- color=bars._color,
- )
- flow.ds_graphics = curve
- curve.hide()
- self.plot.addItem(curve)
-
- # baseline "line" downsampled OHLC curve that should
- # kick on only when we reach a certain uppx threshold.
- self._render_table = (ds_curve_r, curve)
-
- ds_r, curve = self._render_table
-
- # do checks for whether or not we require downsampling:
- # - if we're **not** downsampling then we simply want to
- # render the bars graphics curve and update..
- # - if instead we are in a downsamplig state then we to
- x_gt = 6
- uppx = curve.x_uppx()
- in_line = should_line = curve.isVisible()
- if (
- in_line
- and uppx < x_gt
- ):
- # print('FLIPPING TO BARS')
- should_line = False
- flow._in_ds = False
-
- elif (
- not in_line
- and uppx >= x_gt
- ):
- # print('FLIPPING TO LINE')
- should_line = True
- flow._in_ds = True
-
- profiler(f'ds logic complete line={should_line}')
-
- # do graphics updates
- if should_line:
- r = ds_r
- graphics = curve
- profiler('updated ds curve')
-
- else:
- graphics = bars
-
- if show_bars:
- bars.show()
-
- changed_to_line = False
- if (
- not in_line
- and should_line
- ):
- # change to line graphic
- log.info(
- f'downsampling to line graphic {self.name}'
- )
- bars.hide()
- curve.show()
- curve.update()
- changed_to_line = True
-
- elif in_line and not should_line:
- # change to bars graphic
- log.info(f'showing bars graphic {self.name}')
- curve.hide()
- bars.show()
- bars.update()
-
- return (
- graphics,
- r,
- {'read_from_key': False},
- should_line,
- changed_to_line,
- )
-
-
-class Flow(msgspec.Struct): # , frozen=True):
- '''
- (Financial Signal-)Flow compound type which wraps a real-time
- shm array stream with displayed graphics (curves, charts)
- for high level access and control as well as efficient incremental
- update.
-
- The intention is for this type to eventually be capable of shm-passing
- of incrementally updated graphics stream data between actors.
-
- '''
- name: str
- plot: pg.PlotItem
- _shm: ShmArray
- flume: Flume
- graphics: Curve | BarItems
-
- # for tracking y-mn/mx for y-axis auto-ranging
- yrange: tuple[float, float] = None
-
- # in some cases a flow may want to change its
- # graphical "type" or, "form" when downsampling, to
- # start this is only ever an interpolation line.
- ds_graphics: Optional[Curve] = None
-
- is_ohlc: bool = False
- render: bool = True # toggle for display loop
-
- # downsampling state
- _last_uppx: float = 0
- _in_ds: bool = False
-
- # map from uppx -> (downsampled data, incremental graphics)
- _src_r: Optional[Renderer] = None
- _render_table: dict[
- Optional[int],
- tuple[Renderer, pg.GraphicsItem],
- ] = (None, None)
-
- # TODO: hackery to be able to set a shm later
- # but whilst also allowing this type to hashable,
- # likely will require serializable token that is used to attach
- # to the underlying shm ref after startup?
- # _shm: Optional[ShmArray] = None # currently, may be filled in "later"
-
- # last read from shm (usually due to an update call)
- _last_read: Optional[np.ndarray] = None
-
- # cache of y-range values per x-range input.
- _mxmns: dict[tuple[int, int], tuple[float, float]] = {}
-
- @property
- def shm(self) -> ShmArray:
- return self._shm
-
- # TODO: remove this and only allow setting through
- # private ``._shm`` attr?
- # @shm.setter
- # def shm(self, shm: ShmArray) -> ShmArray:
- # self._shm = shm
-
- def maxmin(
- self,
- lbar: int,
- rbar: int,
-
- ) -> Optional[tuple[float, float]]:
- '''
- Compute the cached max and min y-range values for a given
- x-range determined by ``lbar`` and ``rbar`` or ``None``
- if no range can be determined (yet).
-
- '''
- rkey = (lbar, rbar)
- cached_result = self._mxmns.get(rkey)
- if cached_result:
- return cached_result
-
- shm = self.shm
- if shm is None:
- return None
-
- arr = shm.array
-
- # build relative indexes into shm array
- # TODO: should we just add/use a method
- # on the shm to do this?
- ifirst = arr[0]['index']
- slice_view = arr[
- lbar - ifirst:
- (rbar - ifirst) + 1
- ]
-
- if not slice_view.size:
- return None
-
- elif self.yrange:
- mxmn = self.yrange
- # print(f'{self.name} M4 maxmin: {mxmn}')
-
- else:
- if self.is_ohlc:
- ylow = np.min(slice_view['low'])
- yhigh = np.max(slice_view['high'])
-
- else:
- view = slice_view[self.name]
- ylow = np.min(view)
- yhigh = np.max(view)
-
- mxmn = ylow, yhigh
- # print(f'{self.name} MANUAL maxmin: {mxmin}')
-
- # cache result for input range
- assert mxmn
- self._mxmns[rkey] = mxmn
-
- return mxmn
-
- def view_range(self) -> tuple[int, int]:
- '''
- Return the indexes in view for the associated
- plot displaying this flow's data.
-
- '''
- vr = self.plot.viewRect()
- return (
- vr.left(),
- vr.right(),
- )
-
- def datums_range(
- self,
- index_field: str = 'index',
- ) -> tuple[
- int, int, int, int, int, int
- ]:
- '''
- Return a range tuple for the datums present in view.
-
- '''
- l, r = self.view_range()
- l = round(l)
- r = round(r)
-
- # TODO: avoid this and have shm passed
- # in earlier.
- if self.shm is None:
- # haven't initialized the flow yet
- return (0, l, 0, 0, r, 0)
-
- array = self.shm.array
- index = array['index']
- start = index[0]
- end = index[-1]
- lbar = max(l, start)
- rbar = min(r, end)
- return (
- start, l, lbar, rbar, r, end,
- )
-
- def read(
- self,
- array_field: Optional[str] = None,
- index_field: str = 'index',
-
- ) -> tuple[
- int, int, np.ndarray,
- int, int, np.ndarray,
- ]:
- '''
- Read the underlying shm array buffer and
- return the data plus indexes for the first
- and last
- which has been written to.
-
- '''
- # readable data
- array = self.shm.array
-
- indexes = array[index_field]
- ifirst = indexes[0]
- ilast = indexes[-1]
-
- ifirst, l, lbar, rbar, r, ilast = self.datums_range()
-
- # get read-relative indices adjusting
- # for master shm index.
- lbar_i = max(l, ifirst) - ifirst
- rbar_i = min(r, ilast) - ifirst
-
- if array_field:
- array = array[array_field]
-
- # TODO: we could do it this way as well no?
- # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1]
- in_view = array[lbar_i: rbar_i + 1]
-
- return (
- # abs indices + full data set
- ifirst, ilast, array,
-
- # relative indices + in view datums
- lbar_i, rbar_i, in_view,
- )
-
- def update_graphics(
- self,
- use_vr: bool = True,
- render: bool = True,
- array_key: Optional[str] = None,
-
- profiler: Optional[Profiler] = None,
- do_append: bool = True,
-
- **kwargs,
-
- ) -> pg.GraphicsObject:
- '''
- Read latest datums from shm and render to (incrementally)
- render to graphics.
-
- '''
- profiler = Profiler(
- msg=f'Flow.update_graphics() for {self.name}',
- disabled=not pg_profile_enabled(),
- ms_threshold=4,
- # ms_threshold=ms_slower_then,
- )
- # shm read and slice to view
- read = (
- xfirst, xlast, src_array,
- ivl, ivr, in_view,
- ) = self.read()
-
- profiler('read src shm data')
-
- graphics = self.graphics
-
- if (
- not in_view.size
- or not render
- ):
- # print('exiting early')
- return graphics
-
- slice_to_head: int = -1
- should_redraw: bool = False
- should_line: bool = False
- rkwargs = {}
-
- # TODO: probably specialize ``Renderer`` types instead of
- # these logic checks?
- # - put these blocks into a `.load_renderer()` meth?
- # - consider a OHLCRenderer, StepCurveRenderer, Renderer?
- r = self._src_r
- if isinstance(graphics, BarItems):
- # XXX: special case where we change out graphics
- # to a line after a certain uppx threshold.
- (
- graphics,
- r,
- rkwargs,
- should_line,
- changed_to_line,
- ) = render_baritems(
- self,
- graphics,
- read,
- profiler,
- **kwargs,
- )
- should_redraw = changed_to_line or not should_line
- self._in_ds = should_line
-
- elif not r:
- if isinstance(graphics, StepCurve):
-
- r = self._src_r = Renderer(
- flow=self,
- fmtr=StepCurveFmtr(
- shm=self.shm,
- flow=self,
- _last_read=read,
- ),
- )
-
- # TODO: append logic inside ``.render()`` isn't
- # correct yet for step curves.. remove this to see it.
- should_redraw = True
- slice_to_head = -2
-
- else:
- r = self._src_r
- if not r:
- # just using for ``.diff()`` atm..
- r = self._src_r = Renderer(
- flow=self,
- fmtr=IncrementalFormatter(
- shm=self.shm,
- flow=self,
- _last_read=read,
- ),
- )
-
- # ``Curve`` derivative case(s):
- array_key = array_key or self.name
- # print(array_key)
-
- # ds update config
- new_sample_rate: bool = False
- should_ds: bool = r._in_ds
- showing_src_data: bool = not r._in_ds
-
- # downsampling incremental state checking
- # check for and set std m4 downsample conditions
- uppx = graphics.x_uppx()
- uppx_diff = (uppx - self._last_uppx)
- profiler(f'diffed uppx {uppx}')
- if (
- uppx > 1
- and abs(uppx_diff) >= 1
- ):
- log.debug(
- f'{array_key} sampler change: {self._last_uppx} -> {uppx}'
- )
- self._last_uppx = uppx
-
- new_sample_rate = True
- showing_src_data = False
- should_ds = True
- should_redraw = True
-
- elif (
- uppx <= 2
- and self._in_ds
- ):
- # we should de-downsample back to our original
- # source data so we clear our path data in prep
- # to generate a new one from original source data.
- new_sample_rate = True
- should_ds = False
- should_redraw = True
-
- showing_src_data = True
- # reset yrange to be computed from source data
- self.yrange = None
-
- # MAIN RENDER LOGIC:
- # - determine in view data and redraw on range change
- # - determine downsampling ops if needed
- # - (incrementally) update ``QPainterPath``
-
- out = r.render(
- read,
- array_key,
- profiler,
- uppx=uppx,
- # use_vr=True,
-
- # TODO: better way to detect and pass this?
- # if we want to eventually cache renderers for a given uppx
- # we should probably use this as a key + state?
- should_redraw=should_redraw,
- new_sample_rate=new_sample_rate,
- should_ds=should_ds,
- showing_src_data=showing_src_data,
-
- slice_to_head=slice_to_head,
- do_append=do_append,
-
- **rkwargs,
- )
- if showing_src_data:
- # print(f"{self.name} SHOWING SOURCE")
- # reset yrange to be computed from source data
- self.yrange = None
-
- if not out:
- log.warning(f'{self.name} failed to render!?')
- return graphics
-
- path, data, reset = out
-
- # if self.yrange:
- # print(f'flow {self.name} yrange from m4: {self.yrange}')
-
- # XXX: SUPER UGGGHHH... without this we get stale cache
- # graphics that don't update until you downsampler again..
- # reset = False
- # if reset:
- # with graphics.reset_cache():
- # # assign output paths to graphicis obj
- # graphics.path = r.path
- # graphics.fast_path = r.fast_path
-
- # # XXX: we don't need this right?
- # # graphics.draw_last_datum(
- # # path,
- # # src_array,
- # # data,
- # # reset,
- # # array_key,
- # # )
- # # graphics.update()
- # # profiler('.update()')
- # else:
- # assign output paths to graphicis obj
- graphics.path = r.path
- graphics.fast_path = r.fast_path
-
- graphics.draw_last_datum(
- path,
- src_array,
- data,
- reset,
- array_key,
- )
- graphics.update()
- profiler('.update()')
-
- # TODO: does this actuallly help us in any way (prolly should
- # look at the source / ask ogi). I think it avoid artifacts on
- # wheel-scroll downsampling curve updates?
- # TODO: is this ever better?
- # graphics.prepareGeometryChange()
- # profiler('.prepareGeometryChange()')
-
- # track downsampled state
- self._in_ds = r._in_ds
-
- return graphics
-
- def draw_last(
- self,
- array_key: Optional[str] = None,
- only_last_uppx: bool = False,
-
- ) -> None:
-
- # shm read and slice to view
- (
- xfirst, xlast, src_array,
- ivl, ivr, in_view,
- ) = self.read()
-
- g = self.graphics
- array_key = array_key or self.name
- x, y = g.draw_last_datum(
- g.path,
- src_array,
- src_array,
- False, # never reset path
- array_key,
- )
-
- # the renderer is downsampling we choose
- # to always try and updadte a single (interpolating)
- # line segment that spans and tries to display
- # the las uppx's worth of datums.
- # we only care about the last pixel's
- # worth of data since that's all the screen
- # can represent on the last column where
- # the most recent datum is being drawn.
- if self._in_ds or only_last_uppx:
- dsg = self.ds_graphics or self.graphics
-
- # XXX: pretty sure we don't need this?
- # if isinstance(g, Curve):
- # with dsg.reset_cache():
- uppx = self._last_uppx
- y = y[-uppx:]
- ymn, ymx = y.min(), y.max()
- # print(f'drawing uppx={uppx} mxmn line: {ymn}, {ymx}')
- try:
- iuppx = x[-uppx]
- except IndexError:
- # we're less then an x-px wide so just grab the start
- # datum index.
- iuppx = x[0]
-
- dsg._last_line = QLineF(
- iuppx, ymn,
- x[-1], ymx,
- )
- # print(f'updating DS curve {self.name}')
- dsg.update()
-
- else:
- # print(f'updating NOT DS curve {self.name}')
- g.update()
-
-
-class Renderer(msgspec.Struct):
-
- flow: Flow
- fmtr: IncrementalFormatter
-
- # output graphics rendering, the main object
- # processed in ``QGraphicsObject.paint()``
- path: Optional[QPainterPath] = None
- fast_path: Optional[QPainterPath] = None
-
- # XXX: just ideas..
- # called on the final data (transform) output to convert
- # to "graphical data form" a format that can be passed to
- # the ``.draw()`` implementation.
- # graphics_t: Optional[Callable[ShmArray, np.ndarray]] = None
- # graphics_t_shm: Optional[ShmArray] = None
-
- # path graphics update implementation methods
- # prepend_fn: Optional[Callable[QPainterPath, QPainterPath]] = None
- # append_fn: Optional[Callable[QPainterPath, QPainterPath]] = None
-
- # downsampling state
- _last_uppx: float = 0
- _in_ds: bool = False
-
- def draw_path(
- self,
- x: np.ndarray,
- y: np.ndarray,
- connect: str | np.ndarray = 'all',
- path: Optional[QPainterPath] = None,
- redraw: bool = False,
-
- ) -> QPainterPath:
-
- path_was_none = path is None
-
- if redraw and path:
- path.clear()
-
- # TODO: avoid this?
- if self.fast_path:
- self.fast_path.clear()
-
- # profiler('cleared paths due to `should_redraw=True`')
-
- path = pg.functions.arrayToQPath(
- x,
- y,
- connect=connect,
- finiteCheck=False,
-
- # reserve mem allocs see:
- # - https://doc.qt.io/qt-5/qpainterpath.html#reserve
- # - https://doc.qt.io/qt-5/qpainterpath.html#capacity
- # - https://doc.qt.io/qt-5/qpainterpath.html#clear
- # XXX: right now this is based on had hoc checks on a
- # hidpi 3840x2160 4k monitor but we should optimize for
- # the target display(s) on the sys.
- # if no_path_yet:
- # graphics.path.reserve(int(500e3))
- # path=path, # path re-use / reserving
- )
-
- # avoid mem allocs if possible
- if path_was_none:
- path.reserve(path.capacity())
-
- return path
-
- def render(
- self,
-
- new_read,
- array_key: str,
- profiler: Profiler,
- uppx: float = 1,
-
- # redraw and ds flags
- should_redraw: bool = False,
- new_sample_rate: bool = False,
- should_ds: bool = False,
- showing_src_data: bool = True,
-
- do_append: bool = True,
- slice_to_head: int = -1,
- use_fpath: bool = True,
-
- # only render datums "in view" of the ``ChartView``
- use_vr: bool = True,
- read_from_key: bool = True,
-
- ) -> list[QPainterPath]:
- '''
- Render the current graphics path(s)
-
- There are (at least) 3 stages from source data to graphics data:
- - a data transform (which can be stored in additional shm)
- - a graphics transform which converts discrete basis data to
- a `float`-basis view-coords graphics basis. (eg. ``ohlc_flatten()``,
- ``step_path_arrays_from_1d()``, etc.)
-
- - blah blah blah (from notes)
-
- '''
- # TODO: can the renderer just call ``Flow.read()`` directly?
- # unpack latest source data read
- fmtr = self.fmtr
-
- (
- _,
- _,
- array,
- ivl,
- ivr,
- in_view,
- ) = new_read
-
- # xy-path data transform: convert source data to a format
- # able to be passed to a `QPainterPath` rendering routine.
- fmt_out = fmtr.format_to_1d(
- new_read,
- array_key,
- profiler,
-
- slice_to_head=slice_to_head,
- read_src_from_key=read_from_key,
- slice_to_inview=use_vr,
- )
-
- # no history in view case
- if not fmt_out:
- # XXX: this might be why the profiler only has exits?
- return
-
- (
- x_1d,
- y_1d,
- connect,
- prepend_length,
- append_length,
- view_changed,
- # append_tres,
-
- ) = fmt_out
-
- # redraw conditions
- if (
- prepend_length > 0
- or new_sample_rate
- or view_changed
-
- # NOTE: comment this to try and make "append paths"
- # work below..
- or append_length > 0
- ):
- should_redraw = True
-
- path = self.path
- fast_path = self.fast_path
- reset = False
-
- # redraw the entire source data if we have either of:
- # - no prior path graphic rendered or,
- # - we always intend to re-render the data only in view
- if (
- path is None
- or should_redraw
- ):
- # print(f"{self.flow.name} -> REDRAWING BRUH")
- if new_sample_rate and showing_src_data:
- log.info(f'DEDOWN -> {array_key}')
- self._in_ds = False
-
- elif should_ds and uppx > 1:
-
- x_1d, y_1d, ymn, ymx = xy_downsample(
- x_1d,
- y_1d,
- uppx,
- )
- self.flow.yrange = ymn, ymx
- # print(f'{self.flow.name} post ds: ymn, ymx: {ymn},{ymx}')
-
- reset = True
- profiler(f'FULL PATH downsample redraw={should_ds}')
- self._in_ds = True
-
- path = self.draw_path(
- x=x_1d,
- y=y_1d,
- connect=connect,
- path=path,
- redraw=True,
- )
-
- profiler(
- 'generated fresh path. '
- f'(should_redraw: {should_redraw} '
- f'should_ds: {should_ds} new_sample_rate: {new_sample_rate})'
- )
-
- # TODO: get this piecewise prepend working - right now it's
- # giving heck on vwap...
- # elif prepend_length:
-
- # prepend_path = pg.functions.arrayToQPath(
- # x[0:prepend_length],
- # y[0:prepend_length],
- # connect='all'
- # )
-
- # # swap prepend path in "front"
- # old_path = graphics.path
- # graphics.path = prepend_path
- # # graphics.path.moveTo(new_x[0], new_y[0])
- # graphics.path.connectPath(old_path)
-
- elif (
- append_length > 0
- and do_append
- ):
- print(f'{array_key} append len: {append_length}')
- # new_x = x_1d[-append_length - 2:] # slice_to_head]
- # new_y = y_1d[-append_length - 2:] # slice_to_head]
- profiler('sliced append path')
- # (
- # x_1d,
- # y_1d,
- # connect,
- # ) = append_tres
-
- profiler(
- f'diffed array input, append_length={append_length}'
- )
-
- # if should_ds and uppx > 1:
- # new_x, new_y = xy_downsample(
- # new_x,
- # new_y,
- # uppx,
- # )
- # profiler(f'fast path downsample redraw={should_ds}')
-
- append_path = self.draw_path(
- x=x_1d,
- y=y_1d,
- connect=connect,
- path=fast_path,
- )
- profiler('generated append qpath')
-
- if use_fpath:
- # print(f'{self.flow.name}: FAST PATH')
- # an attempt at trying to make append-updates faster..
- if fast_path is None:
- fast_path = append_path
- # fast_path.reserve(int(6e3))
- else:
- fast_path.connectPath(append_path)
- size = fast_path.capacity()
- profiler(f'connected fast path w size: {size}')
-
- print(
- f"append_path br: {append_path.boundingRect()}\n"
- f"path size: {size}\n"
- f"append_path len: {append_path.length()}\n"
- f"fast_path len: {fast_path.length()}\n"
- )
- # graphics.path.moveTo(new_x[0], new_y[0])
- # path.connectPath(append_path)
-
- # XXX: lol this causes a hang..
- # graphics.path = graphics.path.simplified()
- else:
- size = path.capacity()
- profiler(f'connected history path w size: {size}')
- path.connectPath(append_path)
-
- self.path = path
- self.fast_path = fast_path
-
- return self.path, array, reset
diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py
index 29162635..5dee7b88 100644
--- a/piker/ui/_fsp.py
+++ b/piker/ui/_fsp.py
@@ -79,14 +79,14 @@ def has_vlm(ohlcv: ShmArray) -> bool:
def update_fsp_chart(
chart: ChartPlotWidget,
- flow,
+ viz,
graphics_name: str,
array_key: Optional[str],
**kwargs,
) -> None:
- shm = flow.shm
+ shm = viz.shm
if not shm:
return
@@ -289,7 +289,7 @@ async def run_fsp_ui(
# first UI update, usually from shm pushed history
update_fsp_chart(
chart,
- chart._flows[array_key],
+ chart.get_viz(array_key),
name,
array_key=array_key,
)
@@ -357,7 +357,7 @@ async def run_fsp_ui(
# last = time.time()
-# TODO: maybe this should be our ``Flow`` type since it maps
+# TODO: maybe this should be our ``Viz`` type since it maps
# one flume to the next? The machinery for task/actor mgmt should
# be part of the instantiation API?
class FspAdmin:
@@ -386,7 +386,7 @@ class FspAdmin:
# TODO: make this a `.src_flume` and add
# a `dst_flume`?
- # (=> but then wouldn't this be the most basic `Flow`?)
+ # (=> but then wouldn't this be the most basic `Viz`?)
self.flume = flume
def rr_next_portal(self) -> tractor.Portal:
@@ -666,7 +666,7 @@ async def open_vlm_displays(
shm = ohlcv
ohlc_chart = linked.chart
- chart = linked.add_plot(
+ vlm_chart = linked.add_plot(
name='volume',
shm=shm,
flume=flume,
@@ -682,10 +682,12 @@ async def open_vlm_displays(
# the curve item internals are pretty convoluted.
style='step',
)
+ vlm_chart.view.enable_auto_yrange()
+
# back-link the volume chart to trigger y-autoranging
# in the ohlc (parent) chart.
ohlc_chart.view.enable_auto_yrange(
- src_vb=chart.view,
+ src_vb=vlm_chart.view,
)
# force 0 to always be in view
@@ -694,7 +696,7 @@ async def open_vlm_displays(
) -> tuple[float, float]:
'''
- Flows "group" maxmin loop; assumes all named flows
+ Viz "group" maxmin loop; assumes all named flows
are in the same co-domain and thus can be sorted
as one set.
@@ -707,7 +709,7 @@ async def open_vlm_displays(
'''
mx = 0
for name in names:
- ymn, ymx = chart.maxmin(name=name)
+ ymn, ymx = vlm_chart.maxmin(name=name)
mx = max(mx, ymx)
return 0, mx
@@ -715,34 +717,33 @@ async def open_vlm_displays(
# TODO: fix the x-axis label issue where if you put
# the axis on the left it's totally not lined up...
# show volume units value on LHS (for dinkus)
- # chart.hideAxis('right')
- # chart.showAxis('left')
+ # vlm_chart.hideAxis('right')
+ # vlm_chart.showAxis('left')
# send back new chart to caller
- task_status.started(chart)
+ task_status.started(vlm_chart)
# should **not** be the same sub-chart widget
- assert chart.name != linked.chart.name
+ assert vlm_chart.name != linked.chart.name
# sticky only on sub-charts atm
- last_val_sticky = chart.plotItem.getAxis(
- 'right')._stickies.get(chart.name)
+ last_val_sticky = vlm_chart.plotItem.getAxis(
+ 'right')._stickies.get(vlm_chart.name)
# read from last calculated value
value = shm.array['volume'][-1]
last_val_sticky.update_from_data(-1, value)
- vlm_curve = chart.update_graphics_from_flow(
+ vlm_curve = vlm_chart.update_graphics_from_flow(
'volume',
- # shm.array,
)
# size view to data once at outset
- chart.view._set_yrange()
+ vlm_chart.view._set_yrange()
# add axis title
- axis = chart.getAxis('right')
+ axis = vlm_chart.getAxis('right')
axis.set_title(' vlm')
if dvlm:
@@ -782,7 +783,7 @@ async def open_vlm_displays(
# XXX: the main chart already contains a vlm "units" axis
# so here we add an overlay wth a y-range in
# $ liquidity-value units (normally a fiat like USD).
- dvlm_pi = chart.overlay_plotitem(
+ dvlm_pi = vlm_chart.overlay_plotitem(
'dolla_vlm',
index=0, # place axis on inside (nearest to chart)
axis_title=' $vlm',
@@ -833,6 +834,7 @@ async def open_vlm_displays(
names: list[str],
pi: pg.PlotItem,
shm: ShmArray,
+ flume: Flume,
step_mode: bool = False,
style: str = 'solid',
@@ -849,7 +851,7 @@ async def open_vlm_displays(
assert isinstance(shm, ShmArray)
assert isinstance(flume, Flume)
- flow = chart.draw_curve(
+ viz = vlm_chart.draw_curve(
name,
shm,
flume,
@@ -860,18 +862,13 @@ async def open_vlm_displays(
style=style,
pi=pi,
)
-
- # TODO: we need a better API to do this..
- # specially store ref to shm for lookup in display loop
- # since only a placeholder of `None` is entered in
- # ``.draw_curve()``.
- # flow = chart._flows[name]
- assert flow.plot is pi
+ assert viz.plot is pi
chart_curves(
fields,
dvlm_pi,
dvlm_flume.rt_shm,
+ dvlm_flume,
step_mode=True,
)
@@ -900,17 +897,17 @@ async def open_vlm_displays(
# displayed and the curves are effectively the same minus
# liquidity events (well at least on low OHLC periods - 1s).
vlm_curve.hide()
- chart.removeItem(vlm_curve)
- vflow = chart._flows['volume']
- vflow.render = False
+ vlm_chart.removeItem(vlm_curve)
+ vlm_viz = vlm_chart._vizs['volume']
+ vlm_viz.render = False
# avoid range sorting on volume once disabled
- chart.view.disable_auto_yrange()
+ vlm_chart.view.disable_auto_yrange()
# Trade rate overlay
# XXX: requires an additional overlay for
# a trades-per-period (time) y-range.
- tr_pi = chart.overlay_plotitem(
+ tr_pi = vlm_chart.overlay_plotitem(
'trade_rates',
# TODO: dynamically update period (and thus this axis?)
@@ -940,6 +937,7 @@ async def open_vlm_displays(
trade_rate_fields,
tr_pi,
fr_flume.rt_shm,
+ fr_flume,
# step_mode=True,
# dashed line to represent "individual trades" being
diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py
index e17e662e..e9a7089f 100644
--- a/piker/ui/_interaction.py
+++ b/piker/ui/_interaction.py
@@ -76,7 +76,6 @@ async def handle_viewmode_kb_inputs(
pressed: set[str] = set()
last = time.time()
- trigger_mode: str
action: str
on_next_release: Optional[Callable] = None
@@ -495,7 +494,7 @@ class ChartView(ViewBox):
chart = self.linked.chart
# don't zoom more then the min points setting
- l, lbar, rbar, r = chart.bars_range()
+ out = l, lbar, rbar, r = chart.get_viz(chart.name).bars_range()
# vl = r - l
# if ev.delta() > 0 and vl <= _min_points_to_show:
@@ -504,7 +503,7 @@ class ChartView(ViewBox):
# if (
# ev.delta() < 0
- # and vl >= len(chart._flows[chart.name].shm.array) + 666
+ # and vl >= len(chart._vizs[chart.name].shm.array) + 666
# ):
# log.debug("Min zoom bruh...")
# return
@@ -821,7 +820,7 @@ class ChartView(ViewBox):
# XXX: only compute the mxmn range
# if none is provided as input!
if not yrange:
- # flow = chart._flows[name]
+ # flow = chart._vizs[name]
yrange = self._maxmin()
if yrange is None:
@@ -912,7 +911,7 @@ class ChartView(ViewBox):
graphics items which are our children.
'''
- graphics = [f.graphics for f in self._chart._flows.values()]
+ graphics = [f.graphics for f in self._chart._vizs.values()]
if not graphics:
return 0
@@ -948,7 +947,7 @@ class ChartView(ViewBox):
plots |= linked.subplots
for chart_name, chart in plots.items():
- for name, flow in chart._flows.items():
+ for name, flow in chart._vizs.items():
if (
not flow.render
diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py
index 2ce23d30..de421cd2 100644
--- a/piker/ui/_ohlc.py
+++ b/piker/ui/_ohlc.py
@@ -36,6 +36,7 @@ from PyQt5.QtCore import (
from PyQt5.QtGui import QPainterPath
+from ._curve import FlowGraphic
from .._profile import pg_profile_enabled, ms_slower_then
from ._style import hcolor
from ..log import get_logger
@@ -51,7 +52,8 @@ log = get_logger(__name__)
def bar_from_ohlc_row(
row: np.ndarray,
# 0.5 is no overlap between arms, 1.0 is full overlap
- w: float = 0.43
+ bar_w: float,
+ bar_gap: float = 0.16
) -> tuple[QLineF]:
'''
@@ -59,8 +61,7 @@ def bar_from_ohlc_row(
OHLC "bar" for use in the "last datum" of a series.
'''
- open, high, low, close, index = row[
- ['open', 'high', 'low', 'close', 'index']]
+ open, high, low, close, index = row
# TODO: maybe consider using `QGraphicsLineItem` ??
# gives us a ``.boundingRect()`` on the objects which may make
@@ -68,9 +69,11 @@ def bar_from_ohlc_row(
# history path faster since it's done in C++:
# https://doc.qt.io/qt-5/qgraphicslineitem.html
+ mid: float = (bar_w / 2) + index
+
# high -> low vertical (body) line
if low != high:
- hl = QLineF(index, low, index, high)
+ hl = QLineF(mid, low, mid, high)
else:
# XXX: if we don't do it renders a weird rectangle?
# see below for filtering this later...
@@ -81,15 +84,18 @@ def bar_from_ohlc_row(
# the index's range according to the view mapping coordinates.
# open line
- o = QLineF(index - w, open, index, open)
+ o = QLineF(index + bar_gap, open, mid, open)
# close line
- c = QLineF(index, close, index + w, close)
+ c = QLineF(
+ mid, close,
+ index + bar_w - bar_gap, close,
+ )
return [hl, o, c]
-class BarItems(pg.GraphicsObject):
+class BarItems(FlowGraphic):
'''
"Price range" bars graphics rendered from a OHLC sampled sequence.
@@ -113,13 +119,24 @@ class BarItems(pg.GraphicsObject):
self.last_bar_pen = pg.mkPen(hcolor(last_bar_color), width=2)
self._name = name
- self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
- self.path = QPainterPath()
- self._last_bar_lines: Optional[tuple[QLineF, ...]] = None
+ # XXX: causes this weird jitter bug when click-drag panning
+ # where the path curve will awkwardly flicker back and forth?
+ # self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
- def x_uppx(self) -> int:
- # we expect the downsample curve report this.
- return 0
+ self.path = QPainterPath()
+ self._last_bar_lines: tuple[QLineF, ...] | None = None
+
+ def x_last(self) -> None | float:
+ '''
+ Return the last most x value of the close line segment
+ or if not drawn yet, ``None``.
+
+ '''
+ if self._last_bar_lines:
+ close_arm_line = self._last_bar_lines[-1]
+ return close_arm_line.x2() if close_arm_line else None
+ else:
+ return None
# Qt docs: https://doc.qt.io/qt-5/qgraphicsitem.html#boundingRect
def boundingRect(self):
@@ -214,33 +231,40 @@ class BarItems(pg.GraphicsObject):
self,
path: QPainterPath,
src_data: np.ndarray,
- render_data: np.ndarray,
reset: bool,
array_key: str,
-
- fields: list[str] = [
- 'index',
- 'open',
- 'high',
- 'low',
- 'close',
- ],
+ index_field: str,
) -> None:
# relevant fields
+ fields: list[str] = [
+ 'open',
+ 'high',
+ 'low',
+ 'close',
+ index_field,
+ ]
ohlc = src_data[fields]
# last_row = ohlc[-1:]
# individual values
- last_row = i, o, h, l, last = ohlc[-1]
+ last_row = o, h, l, last, i = ohlc[-1]
# times = src_data['time']
# if times[-1] - times[-2]:
# breakpoint()
+ index = src_data[index_field]
+ step_size = index[-1] - index[-2]
+
# generate new lines objects for updatable "current bar"
- self._last_bar_lines = bar_from_ohlc_row(last_row)
+ bg: float = 0.16 * step_size
+ self._last_bar_lines = bar_from_ohlc_row(
+ last_row,
+ bar_w=step_size,
+ bar_gap=bg,
+ )
# assert i == graphics.start_index - 1
# assert i == last_index
@@ -255,10 +279,16 @@ class BarItems(pg.GraphicsObject):
if l != h: # noqa
if body is None:
- body = self._last_bar_lines[0] = QLineF(i, l, i, h)
+ body = self._last_bar_lines[0] = QLineF(
+ i + bg, l,
+ i + step_size - bg, h,
+ )
else:
# update body
- body.setLine(i, l, i, h)
+ body.setLine(
+ body.x1(), l,
+ body.x2(), h,
+ )
# XXX: pretty sure this is causing an issue where the
# bar has a large upward move right before the next
@@ -270,4 +300,4 @@ class BarItems(pg.GraphicsObject):
# because i've seen it do this to bars i - 3 back?
# return ohlc['time'], ohlc['close']
- return ohlc['index'], ohlc['close']
+ return ohlc[index_field], ohlc['close']
diff --git a/piker/ui/_pg_overrides.py b/piker/ui/_pg_overrides.py
index 397954fd..b7c0b9aa 100644
--- a/piker/ui/_pg_overrides.py
+++ b/piker/ui/_pg_overrides.py
@@ -54,6 +54,10 @@ def _do_overrides() -> None:
pg.functions.invertQTransform = invertQTransform
pg.PlotItem = PlotItem
+ # enable "QPainterPathPrivate for faster arrayToQPath" from
+ # https://github.com/pyqtgraph/pyqtgraph/pull/2324
+ pg.setConfigOption('enableExperimental', True)
+
# NOTE: the below customized type contains all our changes on a method
# by method basis as per the diff:
diff --git a/piker/ui/_render.py b/piker/ui/_render.py
new file mode 100644
index 00000000..dff46dab
--- /dev/null
+++ b/piker/ui/_render.py
@@ -0,0 +1,332 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+High level streaming graphics primitives.
+
+This is an intermediate layer which associates real-time low latency
+graphics primitives with underlying stream/flow related data structures
+for fast incremental update.
+
+'''
+from __future__ import annotations
+from typing import (
+ Optional,
+ TYPE_CHECKING,
+)
+
+import msgspec
+import numpy as np
+import pyqtgraph as pg
+from PyQt5.QtGui import QPainterPath
+
+from ..data._formatters import (
+ IncrementalFormatter,
+)
+from ..data._pathops import (
+ xy_downsample,
+)
+from ..log import get_logger
+from .._profile import (
+ Profiler,
+)
+
+if TYPE_CHECKING:
+ from ._dataviz import Viz
+
+
+log = get_logger(__name__)
+
+
+class Renderer(msgspec.Struct):
+
+ viz: Viz
+ fmtr: IncrementalFormatter
+
+ # output graphics rendering, the main object
+ # processed in ``QGraphicsObject.paint()``
+ path: Optional[QPainterPath] = None
+ fast_path: Optional[QPainterPath] = None
+
+ # XXX: just ideas..
+ # called on the final data (transform) output to convert
+ # to "graphical data form" a format that can be passed to
+ # the ``.draw()`` implementation.
+ # graphics_t: Optional[Callable[ShmArray, np.ndarray]] = None
+ # graphics_t_shm: Optional[ShmArray] = None
+
+ # path graphics update implementation methods
+ # prepend_fn: Optional[Callable[QPainterPath, QPainterPath]] = None
+ # append_fn: Optional[Callable[QPainterPath, QPainterPath]] = None
+
+ # downsampling state
+ _last_uppx: float = 0
+ _in_ds: bool = False
+
+ def draw_path(
+ self,
+ x: np.ndarray,
+ y: np.ndarray,
+ connect: str | np.ndarray = 'all',
+ path: Optional[QPainterPath] = None,
+ redraw: bool = False,
+
+ ) -> QPainterPath:
+
+ path_was_none = path is None
+
+ if redraw and path:
+ path.clear()
+
+ # TODO: avoid this?
+ if self.fast_path:
+ self.fast_path.clear()
+
+ path = pg.functions.arrayToQPath(
+ x,
+ y,
+ connect=connect,
+ finiteCheck=False,
+
+ # reserve mem allocs see:
+ # - https://doc.qt.io/qt-5/qpainterpath.html#reserve
+ # - https://doc.qt.io/qt-5/qpainterpath.html#capacity
+ # - https://doc.qt.io/qt-5/qpainterpath.html#clear
+ # XXX: right now this is based on had hoc checks on a
+ # hidpi 3840x2160 4k monitor but we should optimize for
+ # the target display(s) on the sys.
+ # if no_path_yet:
+ # graphics.path.reserve(int(500e3))
+ # path=path, # path re-use / reserving
+ )
+
+ # avoid mem allocs if possible
+ if path_was_none:
+ path.reserve(path.capacity())
+
+ return path
+
+ def render(
+ self,
+
+ new_read,
+ array_key: str,
+ profiler: Profiler,
+ uppx: float = 1,
+
+ # redraw and ds flags
+ should_redraw: bool = False,
+ new_sample_rate: bool = False,
+ should_ds: bool = False,
+ showing_src_data: bool = True,
+
+ do_append: bool = True,
+ use_fpath: bool = True,
+
+ # only render datums "in view" of the ``ChartView``
+ use_vr: bool = True,
+
+ ) -> tuple[QPainterPath, bool]:
+ '''
+ Render the current graphics path(s)
+
+ There are (at least) 3 stages from source data to graphics data:
+ - a data transform (which can be stored in additional shm)
+ - a graphics transform which converts discrete basis data to
+ a `float`-basis view-coords graphics basis. (eg. ``ohlc_flatten()``,
+ ``step_path_arrays_from_1d()``, etc.)
+
+ - blah blah blah (from notes)
+
+ '''
+ # TODO: can the renderer just call ``Viz.read()`` directly?
+ # unpack latest source data read
+ fmtr = self.fmtr
+
+ (
+ _,
+ _,
+ array,
+ ivl,
+ ivr,
+ in_view,
+ ) = new_read
+
+ # xy-path data transform: convert source data to a format
+ # able to be passed to a `QPainterPath` rendering routine.
+ fmt_out = fmtr.format_to_1d(
+ new_read,
+ array_key,
+ profiler,
+
+ slice_to_inview=use_vr,
+ )
+
+ # no history in view case
+ if not fmt_out:
+ # XXX: this might be why the profiler only has exits?
+ return
+
+ (
+ x_1d,
+ y_1d,
+ connect,
+ prepend_length,
+ append_length,
+ view_changed,
+ # append_tres,
+
+ ) = fmt_out
+
+ # redraw conditions
+ if (
+ prepend_length > 0
+ or new_sample_rate
+ or view_changed
+
+ # NOTE: comment this to try and make "append paths"
+ # work below..
+ or append_length > 0
+ ):
+ should_redraw = True
+
+ path: QPainterPath = self.path
+ fast_path: QPainterPath = self.fast_path
+ reset: bool = False
+
+ self.viz.yrange = None
+
+ # redraw the entire source data if we have either of:
+ # - no prior path graphic rendered or,
+ # - we always intend to re-render the data only in view
+ if (
+ path is None
+ or should_redraw
+ ):
+ # print(f"{self.viz.name} -> REDRAWING BRUH")
+ if new_sample_rate and showing_src_data:
+ log.info(f'DEDOWN -> {array_key}')
+ self._in_ds = False
+
+ elif should_ds and uppx > 1:
+
+ x_1d, y_1d, ymn, ymx = xy_downsample(
+ x_1d,
+ y_1d,
+ uppx,
+ )
+ self.viz.yrange = ymn, ymx
+ # print(f'{self.viz.name} post ds: ymn, ymx: {ymn},{ymx}')
+
+ reset = True
+ profiler(f'FULL PATH downsample redraw={should_ds}')
+ self._in_ds = True
+
+ path = self.draw_path(
+ x=x_1d,
+ y=y_1d,
+ connect=connect,
+ path=path,
+ redraw=True,
+ )
+
+ profiler(
+ 'generated fresh path. '
+ f'(should_redraw: {should_redraw} '
+ f'should_ds: {should_ds} new_sample_rate: {new_sample_rate})'
+ )
+
+ # TODO: get this piecewise prepend working - right now it's
+ # giving heck on vwap...
+ # elif prepend_length:
+
+ # prepend_path = pg.functions.arrayToQPath(
+ # x[0:prepend_length],
+ # y[0:prepend_length],
+ # connect='all'
+ # )
+
+ # # swap prepend path in "front"
+ # old_path = graphics.path
+ # graphics.path = prepend_path
+ # # graphics.path.moveTo(new_x[0], new_y[0])
+ # graphics.path.connectPath(old_path)
+
+ elif (
+ append_length > 0
+ and do_append
+ ):
+ print(f'{array_key} append len: {append_length}')
+ # new_x = x_1d[-append_length - 2:] # slice_to_head]
+ # new_y = y_1d[-append_length - 2:] # slice_to_head]
+ profiler('sliced append path')
+ # (
+ # x_1d,
+ # y_1d,
+ # connect,
+ # ) = append_tres
+
+ profiler(
+ f'diffed array input, append_length={append_length}'
+ )
+
+ # if should_ds and uppx > 1:
+ # new_x, new_y = xy_downsample(
+ # new_x,
+ # new_y,
+ # uppx,
+ # )
+ # profiler(f'fast path downsample redraw={should_ds}')
+
+ append_path = self.draw_path(
+ x=x_1d,
+ y=y_1d,
+ connect=connect,
+ path=fast_path,
+ )
+ profiler('generated append qpath')
+
+ if use_fpath:
+ # print(f'{self.viz.name}: FAST PATH')
+ # an attempt at trying to make append-updates faster..
+ if fast_path is None:
+ fast_path = append_path
+ # fast_path.reserve(int(6e3))
+ else:
+ fast_path.connectPath(append_path)
+ size = fast_path.capacity()
+ profiler(f'connected fast path w size: {size}')
+
+ print(
+ f"append_path br: {append_path.boundingRect()}\n"
+ f"path size: {size}\n"
+ f"append_path len: {append_path.length()}\n"
+ f"fast_path len: {fast_path.length()}\n"
+ )
+ # graphics.path.moveTo(new_x[0], new_y[0])
+ # path.connectPath(append_path)
+
+ # XXX: lol this causes a hang..
+ # graphics.path = graphics.path.simplified()
+ else:
+ size = path.capacity()
+ profiler(f'connected history path w size: {size}')
+ path.connectPath(append_path)
+
+ self.path = path
+ self.fast_path = fast_path
+
+ return self.path, reset
diff --git a/piker/ui/order_mode.py b/piker/ui/order_mode.py
index 1dd49872..4ac2f457 100644
--- a/piker/ui/order_mode.py
+++ b/piker/ui/order_mode.py
@@ -494,7 +494,7 @@ class OrderMode:
uuid: str,
price: float,
- arrow_index: float,
+ time_s: float,
pointing: Optional[str] = None,
@@ -513,22 +513,32 @@ class OrderMode:
'''
dialog = self.dialogs[uuid]
lines = dialog.lines
+ chart = self.chart
+
# XXX: seems to fail on certain types of races?
# assert len(lines) == 2
if lines:
- flume: Flume = self.feed.flumes[self.chart.linked.symbol.fqsn]
+ flume: Flume = self.feed.flumes[chart.linked.symbol.fqsn]
_, _, ratio = flume.get_ds_info()
- for i, chart in [
- (arrow_index, self.chart),
- (flume.izero_hist
- +
- round((arrow_index - flume.izero_rt)/ratio),
- self.hist_chart)
+
+ for chart, shm in [
+ (self.chart, flume.rt_shm),
+ (self.hist_chart, flume.hist_shm),
]:
+ viz = chart.get_viz(chart.name)
+ index_field = viz.index_field
+ arr = shm.array
+
+ # TODO: borked for int index based..
+ index = flume.get_index(time_s, arr)
+
+ # get absolute index for arrow placement
+ arrow_index = arr[index_field][index]
+
self.arrows.add(
chart.plotItem,
uuid,
- i,
+ arrow_index,
price,
pointing=pointing,
color=lines[0].color
@@ -966,7 +976,6 @@ async def process_trade_msg(
if dialog:
fqsn = dialog.symbol
- flume = mode.feed.flumes[fqsn]
match msg:
case Status(
@@ -1037,11 +1046,11 @@ async def process_trade_msg(
# should only be one "fill" for an alert
# add a triangle and remove the level line
req = Order(**req)
- index = flume.get_index(time.time())
+ tm = time.time()
mode.on_fill(
oid,
price=req.price,
- arrow_index=index,
+ time_s=tm,
)
mode.lines.remove_line(uuid=oid)
msg.req = req
@@ -1070,6 +1079,8 @@ async def process_trade_msg(
details = msg.brokerd_msg
# TODO: put the actual exchange timestamp?
+ # TODO: some kinda progress system?
+
# NOTE: currently the ``kraken`` openOrders sub
# doesn't deliver their engine timestamp as part of
# it's schema, so this value is **not** from them
@@ -1080,15 +1091,11 @@ async def process_trade_msg(
# a true backend one? This will require finagling
# with how each backend tracks/summarizes time
# stamps for the downstream API.
- index = flume.get_index(
- details['broker_time']
- )
-
- # TODO: some kinda progress system
+ tm = details['broker_time']
mode.on_fill(
oid,
price=details['price'],
- arrow_index=index,
+ time_s=tm,
pointing='up' if action == 'buy' else 'down',
)
diff --git a/piker/ui/qt/__init__.py b/piker/ui/qt/__init__.py
deleted file mode 100644
index 8513b317..00000000
--- a/piker/ui/qt/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""
-Super hawt Qt UI components
-"""
diff --git a/piker/ui/qt/stackof_candle.py b/piker/ui/qt/stackof_candle.py
deleted file mode 100644
index 0bcd37e4..00000000
--- a/piker/ui/qt/stackof_candle.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import sys
-
-from PySide2.QtCharts import QtCharts
-from PySide2.QtWidgets import QApplication, QMainWindow
-from PySide2.QtCore import Qt, QPointF
-from PySide2 import QtGui
-import qdarkstyle
-
-data = ((1, 7380, 7520, 7380, 7510, 7324),
- (2, 7520, 7580, 7410, 7440, 7372),
- (3, 7440, 7650, 7310, 7520, 7434),
- (4, 7450, 7640, 7450, 7550, 7480),
- (5, 7510, 7590, 7460, 7490, 7502),
- (6, 7500, 7590, 7480, 7560, 7512),
- (7, 7560, 7830, 7540, 7800, 7584))
-
-
-app = QApplication([])
-# set dark stylesheet
-# import pdb; pdb.set_trace()
-app.setStyleSheet(qdarkstyle.load_stylesheet_pyside())
-
-series = QtCharts.QCandlestickSeries()
-series.setDecreasingColor(Qt.darkRed)
-series.setIncreasingColor(Qt.darkGreen)
-
-ma5 = QtCharts.QLineSeries() # 5-days average data line
-tm = [] # stores str type data
-
-# in a loop, series and ma5 append corresponding data
-for num, o, h, l, c, m in data:
- candle = QtCharts.QCandlestickSet(o, h, l, c)
- series.append(candle)
- ma5.append(QPointF(num, m))
- tm.append(str(num))
-
-pen = candle.pen()
-# import pdb; pdb.set_trace()
-
-chart = QtCharts.QChart()
-
-# import pdb; pdb.set_trace()
-series.setBodyOutlineVisible(False)
-series.setCapsVisible(False)
-# brush = QtGui.QBrush()
-# brush.setColor(Qt.green)
-# series.setBrush(brush)
-chart.addSeries(series) # candle
-chart.addSeries(ma5) # ma5 line
-
-chart.setAnimationOptions(QtCharts.QChart.SeriesAnimations)
-chart.createDefaultAxes()
-chart.legend().hide()
-
-chart.axisX(series).setCategories(tm)
-chart.axisX(ma5).setVisible(False)
-
-view = QtCharts.QChartView(chart)
-view.chart().setTheme(QtCharts.QChart.ChartTheme.ChartThemeDark)
-view.setRubberBand(QtCharts.QChartView.HorizontalRubberBand)
-# chartview.chart().setTheme(QtCharts.QChart.ChartTheme.ChartThemeBlueCerulean)
-
-ui = QMainWindow()
-# ui.setGeometry(50, 50, 500, 300)
-ui.setCentralWidget(view)
-ui.show()
-sys.exit(app.exec_())