From cda05b1a910ff335450c6a1a6b2bc8e39443c31d Mon Sep 17 00:00:00 2001 From: Dan Ponte Date: Wed, 1 Feb 2023 21:37:42 +0000 Subject: [PATCH] Vendor picamera --- picamera/__init__.py | 114 ++ picamera/array.py | 908 +++++++++ picamera/bcm_host.py | 991 ++++++++++ picamera/camera.py | 4165 +++++++++++++++++++++++++++++++++++++++++ picamera/color.py | 50 + picamera/display.py | 320 ++++ picamera/encoders.py | 1218 ++++++++++++ picamera/exc.py | 185 ++ picamera/frames.py | 214 +++ picamera/mmal.py | 2481 ++++++++++++++++++++++++ picamera/mmalobj.py | 3736 ++++++++++++++++++++++++++++++++++++ picamera/renderers.py | 605 ++++++ picamera/streams.py | 833 +++++++++ 13 files changed, 15820 insertions(+) create mode 100644 picamera/__init__.py create mode 100644 picamera/array.py create mode 100644 picamera/bcm_host.py create mode 100644 picamera/camera.py create mode 100644 picamera/color.py create mode 100644 picamera/display.py create mode 100644 picamera/encoders.py create mode 100644 picamera/exc.py create mode 100644 picamera/frames.py create mode 100644 picamera/mmal.py create mode 100644 picamera/mmalobj.py create mode 100644 picamera/renderers.py create mode 100644 picamera/streams.py diff --git a/picamera/__init__.py b/picamera/__init__.py new file mode 100644 index 0000000..0f98b1a --- /dev/null +++ b/picamera/__init__.py @@ -0,0 +1,114 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python camera library for the Rasperry-Pi camera module +# Copyright (c) 2013-2017 Dave Jones +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +The picamera package consists of several modules which provide a pure Python +interface to the Raspberry Pi's camera module. The package is only intended to +run on a Raspberry Pi, and expects to be able to load the MMAL library +(libmmal.so) upon import. + +The classes defined by most modules in this package are available directly from +the :mod:`picamera` namespace. In other words, the following code is typically +all that is required to access classes in the package:: + + import picamera + +The :mod:`picamera.array` module is an exception to this as it depends on the +third-party `numpy`_ package (this avoids making numpy a mandatory dependency +for picamera). + +.. _numpy: http://www.numpy.org/ + + +The following sections document the various modules available within the +package: + +* :mod:`picamera.camera` +* :mod:`picamera.encoders` +* :mod:`picamera.frames` +* :mod:`picamera.streams` +* :mod:`picamera.renderers` +* :mod:`picamera.color` +* :mod:`picamera.exc` +* :mod:`picamera.array` +""" + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str equivalent to Py3's +str = type('') + +from picamera.exc import ( + PiCameraWarning, + PiCameraDeprecated, + PiCameraFallback, + PiCameraAlphaStripping, + PiCameraResizerEncoding, + PiCameraError, + PiCameraRuntimeError, + PiCameraClosed, + PiCameraNotRecording, + PiCameraAlreadyRecording, + PiCameraValueError, + PiCameraMMALError, + PiCameraPortDisabled, + mmal_check, + ) +from picamera.mmalobj import PiResolution, PiFramerateRange, PiSensorMode +from picamera.camera import PiCamera +from picamera.display import PiDisplay +from picamera.frames import PiVideoFrame, PiVideoFrameType +from picamera.encoders import ( + PiEncoder, + PiVideoEncoder, + PiImageEncoder, + PiRawMixin, + PiCookedVideoEncoder, + PiRawVideoEncoder, + PiOneImageEncoder, + PiMultiImageEncoder, + PiRawImageMixin, + PiCookedOneImageEncoder, + PiRawOneImageEncoder, + PiCookedMultiImageEncoder, + PiRawMultiImageEncoder, + ) +from picamera.renderers import ( + PiRenderer, + PiOverlayRenderer, + PiPreviewRenderer, + PiNullSink, + ) +from picamera.streams import PiCameraCircularIO, CircularIO, BufferIO +from picamera.color import Color, Red, Green, Blue, Hue, Lightness, Saturation diff --git a/picamera/array.py b/picamera/array.py new file mode 100644 index 0000000..3d2b783 --- /dev/null +++ b/picamera/array.py @@ -0,0 +1,908 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python camera library for the Rasperry-Pi camera module +# Copyright (c) 2013-2017 Dave Jones +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str and range equivalent to Py3's +native_str = str +str = type('') +try: + range = xrange +except NameError: + pass + +import io +import ctypes as ct +import warnings + +import numpy as np +from numpy.lib.stride_tricks import as_strided + +from . import mmalobj as mo, mmal +from .exc import ( + mmal_check, + PiCameraValueError, + PiCameraDeprecated, + PiCameraPortDisabled, + ) + + +motion_dtype = np.dtype([ + (native_str('x'), np.int8), + (native_str('y'), np.int8), + (native_str('sad'), np.uint16), + ]) + + +def raw_resolution(resolution, splitter=False): + """ + Round a (width, height) tuple up to the nearest multiple of 32 horizontally + and 16 vertically (as this is what the Pi's camera module does for + unencoded output). + """ + width, height = resolution + if splitter: + fwidth = (width + 15) & ~15 + else: + fwidth = (width + 31) & ~31 + fheight = (height + 15) & ~15 + return fwidth, fheight + + +def bytes_to_yuv(data, resolution): + """ + Converts a bytes object containing YUV data to a `numpy`_ array. + """ + width, height = resolution + fwidth, fheight = raw_resolution(resolution) + y_len = fwidth * fheight + uv_len = (fwidth // 2) * (fheight // 2) + if len(data) != (y_len + 2 * uv_len): + raise PiCameraValueError( + 'Incorrect buffer length for resolution %dx%d' % (width, height)) + # Separate out the Y, U, and V values from the array + a = np.frombuffer(data, dtype=np.uint8) + Y = a[:y_len].reshape((fheight, fwidth)) + Uq = a[y_len:-uv_len].reshape((fheight // 2, fwidth // 2)) + Vq = a[-uv_len:].reshape((fheight // 2, fwidth // 2)) + # Reshape the values into two dimensions, and double the size of the + # U and V values (which only have quarter resolution in YUV4:2:0) + U = np.empty_like(Y) + V = np.empty_like(Y) + U[0::2, 0::2] = Uq + U[0::2, 1::2] = Uq + U[1::2, 0::2] = Uq + U[1::2, 1::2] = Uq + V[0::2, 0::2] = Vq + V[0::2, 1::2] = Vq + V[1::2, 0::2] = Vq + V[1::2, 1::2] = Vq + # Stack the channels together and crop to the actual resolution + return np.dstack((Y, U, V))[:height, :width] + + +def bytes_to_rgb(data, resolution): + """ + Converts a bytes objects containing RGB/BGR data to a `numpy`_ array. + """ + width, height = resolution + fwidth, fheight = raw_resolution(resolution) + # Workaround: output from the video splitter is rounded to 16x16 instead + # of 32x16 (but only for RGB, and only when a resizer is not used) + if len(data) != (fwidth * fheight * 3): + fwidth, fheight = raw_resolution(resolution, splitter=True) + if len(data) != (fwidth * fheight * 3): + raise PiCameraValueError( + 'Incorrect buffer length for resolution %dx%d' % (width, height)) + # Crop to the actual resolution + return np.frombuffer(data, dtype=np.uint8).\ + reshape((fheight, fwidth, 3))[:height, :width, :] + + +class PiArrayOutput(io.BytesIO): + """ + Base class for capture arrays. + + This class extends :class:`io.BytesIO` with a `numpy`_ array which is + intended to be filled when :meth:`~io.IOBase.flush` is called (i.e. at the + end of capture). + + .. attribute:: array + + After :meth:`~io.IOBase.flush` is called, this attribute contains the + frame's data as a multi-dimensional `numpy`_ array. This is typically + organized with the dimensions ``(rows, columns, plane)``. Hence, an + RGB image with dimensions *x* and *y* would produce an array with shape + ``(y, x, 3)``. + """ + + def __init__(self, camera, size=None): + super(PiArrayOutput, self).__init__() + self.camera = camera + self.size = size + self.array = None + + def close(self): + super(PiArrayOutput, self).close() + self.array = None + + def truncate(self, size=None): + """ + Resize the stream to the given size in bytes (or the current position + if size is not specified). This resizing can extend or reduce the + current file size. The new file size is returned. + + In prior versions of picamera, truncation also changed the position of + the stream (because prior versions of these stream classes were + non-seekable). This functionality is now deprecated; scripts should + use :meth:`~io.IOBase.seek` and :meth:`truncate` as one would with + regular :class:`~io.BytesIO` instances. + """ + if size is not None: + warnings.warn( + PiCameraDeprecated( + 'This method changes the position of the stream to the ' + 'truncated length; this is deprecated functionality and ' + 'you should not rely on it (seek before or after truncate ' + 'to ensure position is consistent)')) + super(PiArrayOutput, self).truncate(size) + if size is not None: + self.seek(size) + + +class PiRGBArray(PiArrayOutput): + """ + Produces a 3-dimensional RGB array from an RGB capture. + + This custom output class can be used to easily obtain a 3-dimensional numpy + array, organized (rows, columns, colors), from an unencoded RGB capture. + The array is accessed via the :attr:`~PiArrayOutput.array` attribute. For + example:: + + import picamera + import picamera.array + + with picamera.PiCamera() as camera: + with picamera.array.PiRGBArray(camera) as output: + camera.capture(output, 'rgb') + print('Captured %dx%d image' % ( + output.array.shape[1], output.array.shape[0])) + + You can re-use the output to produce multiple arrays by emptying it with + ``truncate(0)`` between captures:: + + import picamera + import picamera.array + + with picamera.PiCamera() as camera: + with picamera.array.PiRGBArray(camera) as output: + camera.resolution = (1280, 720) + camera.capture(output, 'rgb') + print('Captured %dx%d image' % ( + output.array.shape[1], output.array.shape[0])) + output.truncate(0) + camera.resolution = (640, 480) + camera.capture(output, 'rgb') + print('Captured %dx%d image' % ( + output.array.shape[1], output.array.shape[0])) + + If you are using the GPU resizer when capturing (with the *resize* + parameter of the various :meth:`~PiCamera.capture` methods), specify the + resized resolution as the optional *size* parameter when constructing the + array output:: + + import picamera + import picamera.array + + with picamera.PiCamera() as camera: + camera.resolution = (1280, 720) + with picamera.array.PiRGBArray(camera, size=(640, 360)) as output: + camera.capture(output, 'rgb', resize=(640, 360)) + print('Captured %dx%d image' % ( + output.array.shape[1], output.array.shape[0])) + """ + + def flush(self): + super(PiRGBArray, self).flush() + self.array = bytes_to_rgb(self.getvalue(), self.size or self.camera.resolution) + + +class PiYUVArray(PiArrayOutput): + """ + Produces 3-dimensional YUV & RGB arrays from a YUV capture. + + This custom output class can be used to easily obtain a 3-dimensional numpy + array, organized (rows, columns, channel), from an unencoded YUV capture. + The array is accessed via the :attr:`~PiArrayOutput.array` attribute. For + example:: + + import picamera + import picamera.array + + with picamera.PiCamera() as camera: + with picamera.array.PiYUVArray(camera) as output: + camera.capture(output, 'yuv') + print('Captured %dx%d image' % ( + output.array.shape[1], output.array.shape[0])) + + The :attr:`rgb_array` attribute can be queried for the equivalent RGB + array (conversion is performed using the `ITU-R BT.601`_ matrix):: + + import picamera + import picamera.array + + with picamera.PiCamera() as camera: + with picamera.array.PiYUVArray(camera) as output: + camera.resolution = (1280, 720) + camera.capture(output, 'yuv') + print(output.array.shape) + print(output.rgb_array.shape) + + If you are using the GPU resizer when capturing (with the *resize* + parameter of the various :meth:`~picamera.PiCamera.capture` methods), + specify the resized resolution as the optional *size* parameter when + constructing the array output:: + + import picamera + import picamera.array + + with picamera.PiCamera() as camera: + camera.resolution = (1280, 720) + with picamera.array.PiYUVArray(camera, size=(640, 360)) as output: + camera.capture(output, 'yuv', resize=(640, 360)) + print('Captured %dx%d image' % ( + output.array.shape[1], output.array.shape[0])) + + .. _ITU-R BT.601: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion + """ + + def __init__(self, camera, size=None): + super(PiYUVArray, self).__init__(camera, size) + self._rgb = None + + def flush(self): + super(PiYUVArray, self).flush() + self.array = bytes_to_yuv(self.getvalue(), self.size or self.camera.resolution) + self._rgb = None + + @property + def rgb_array(self): + if self._rgb is None: + # Apply the standard biases + YUV = self.array.astype(float) + YUV[:, :, 0] = YUV[:, :, 0] - 16 # Offset Y by 16 + YUV[:, :, 1:] = YUV[:, :, 1:] - 128 # Offset UV by 128 + # YUV conversion matrix from ITU-R BT.601 version (SDTV) + # Y U V + M = np.array([[1.164, 0.000, 1.596], # R + [1.164, -0.392, -0.813], # G + [1.164, 2.017, 0.000]]) # B + # Calculate the dot product with the matrix to produce RGB output, + # clamp the results to byte range and convert to bytes + self._rgb = YUV.dot(M.T).clip(0, 255).astype(np.uint8) + return self._rgb + + +class BroadcomRawHeader(ct.Structure): + _fields_ = [ + ('name', ct.c_char * 32), + ('width', ct.c_uint16), + ('height', ct.c_uint16), + ('padding_right', ct.c_uint16), + ('padding_down', ct.c_uint16), + ('dummy', ct.c_uint32 * 6), + ('transform', ct.c_uint16), + ('format', ct.c_uint16), + ('bayer_order', ct.c_uint8), + ('bayer_format', ct.c_uint8), + ] + + +class PiBayerArray(PiArrayOutput): + """ + Produces a 3-dimensional RGB array from raw Bayer data. + + This custom output class is intended to be used with the + :meth:`~picamera.PiCamera.capture` method, with the *bayer* parameter set + to ``True``, to include raw Bayer data in the JPEG output. The class + strips out the raw data, and constructs a numpy array from it. The + resulting data is accessed via the :attr:`~PiArrayOutput.array` attribute:: + + import picamera + import picamera.array + + with picamera.PiCamera() as camera: + with picamera.array.PiBayerArray(camera) as output: + camera.capture(output, 'jpeg', bayer=True) + print(output.array.shape) + + The *output_dims* parameter specifies whether the resulting array is + three-dimensional (the default, or when *output_dims* is 3), or + two-dimensional (when *output_dims* is 2). The three-dimensional data is + already separated into the three color planes, whilst the two-dimensional + variant is not (in which case you need to know the Bayer ordering to + accurately deal with the results). + + .. note:: + + Bayer data is *usually* full resolution, so the resulting array usually + has the shape (1944, 2592, 3) with the V1 module, or (2464, 3280, 3) + with the V2 module (if two-dimensional output is requested the + 3-layered color dimension is omitted). If the camera's + :attr:`~picamera.PiCamera.sensor_mode` has been forced to something + other than 0, then the output will be the native size for the requested + sensor mode. + + This also implies that the optional *size* parameter (for specifying a + resizer resolution) is not available with this array class. + + As the sensor records 10-bit values, the array uses the unsigned 16-bit + integer data type. + + By default, `de-mosaicing`_ is **not** performed; if the resulting array is + viewed it will therefore appear dark and too green (due to the green bias + in the `Bayer pattern`_). A trivial weighted-average demosaicing algorithm + is provided in the :meth:`demosaic` method:: + + import picamera + import picamera.array + + with picamera.PiCamera() as camera: + with picamera.array.PiBayerArray(camera) as output: + camera.capture(output, 'jpeg', bayer=True) + print(output.demosaic().shape) + + Viewing the result of the de-mosaiced data will look more normal but still + considerably worse quality than the regular camera output (as none of the + other usual post-processing steps like auto-exposure, white-balance, + vignette compensation, and smoothing have been performed). + + .. versionchanged:: 1.13 + This class now supports the V2 module properly, and handles flipped + images, and forced sensor modes correctly. + + .. _de-mosaicing: https://en.wikipedia.org/wiki/Demosaicing + .. _Bayer pattern: https://en.wikipedia.org/wiki/Bayer_filter + """ + BAYER_OFFSETS = { + 0: ((0, 0), (1, 0), (0, 1), (1, 1)), + 1: ((1, 0), (0, 0), (1, 1), (0, 1)), + 2: ((1, 1), (0, 1), (1, 0), (0, 0)), + 3: ((0, 1), (1, 1), (0, 0), (1, 0)), + } + + def __init__(self, camera, output_dims=3): + super(PiBayerArray, self).__init__(camera, size=None) + if not (2 <= output_dims <= 3): + raise PiCameraValueError('output_dims must be 2 or 3') + self._demo = None + self._header = None + self._output_dims = output_dims + + @property + def output_dims(self): + return self._output_dims + + def _to_3d(self, array): + array_3d = np.zeros(array.shape + (3,), dtype=array.dtype) + ( + (ry, rx), (gy, gx), (Gy, Gx), (by, bx) + ) = PiBayerArray.BAYER_OFFSETS[self._header.bayer_order] + array_3d[ry::2, rx::2, 0] = array[ry::2, rx::2] # Red + array_3d[gy::2, gx::2, 1] = array[gy::2, gx::2] # Green + array_3d[Gy::2, Gx::2, 1] = array[Gy::2, Gx::2] # Green + array_3d[by::2, bx::2, 2] = array[by::2, bx::2] # Blue + return array_3d + + def flush(self): + super(PiBayerArray, self).flush() + self._demo = None + offset = { + 'OV5647': { + 0: 6404096, + 1: 2717696, + 2: 6404096, + 3: 6404096, + 4: 1625600, + 5: 1233920, + 6: 445440, + 7: 445440, + }, + 'IMX219': { + 0: 10270208, + 1: 2678784, + 2: 10270208, + 3: 10270208, + 4: 2628608, + 5: 1963008, + 6: 1233920, + 7: 445440, + }, + }[self.camera.revision.upper()][self.camera.sensor_mode] + data = self.getvalue()[-offset:] + if data[:4] != b'BRCM': + raise PiCameraValueError('Unable to locate Bayer data at end of buffer') + # Extract header (with bayer order and other interesting bits), which + # is 176 bytes from start of bayer data, and pixel data which 32768 + # bytes from start of bayer data + self._header = BroadcomRawHeader.from_buffer_copy( + data[176:176 + ct.sizeof(BroadcomRawHeader)]) + data = np.frombuffer(data, dtype=np.uint8, offset=32768) + # Reshape and crop the data. The crop's width is multiplied by 5/4 to + # deal with the packed 10-bit format; the shape's width is calculated + # in a similar fashion but with padding included (which involves + # several additional padding steps) + crop = mo.PiResolution( + self._header.width * 5 // 4, + self._header.height) + shape = mo.PiResolution( + (((self._header.width + self._header.padding_right) * 5) + 3) // 4, + (self._header.height + self._header.padding_down) + ).pad() + data = data.reshape((shape.height, shape.width))[:crop.height, :crop.width] + # Unpack 10-bit values; every 5 bytes contains the high 8-bits of 4 + # values followed by the low 2-bits of 4 values packed into the fifth + # byte + data = data.astype(np.uint16) << 2 + for byte in range(4): + data[:, byte::5] |= ((data[:, 4::5] >> (byte * 2)) & 3) + self.array = np.zeros( + (data.shape[0], data.shape[1] * 4 // 5), dtype=np.uint16) + for i in range(4): + self.array[:, i::4] = data[:, i::5] + if self.output_dims == 3: + self.array = self._to_3d(self.array) + + def demosaic(self): + """ + Perform a rudimentary `de-mosaic`_ of ``self.array``, returning the + result as a new array. The result of the demosaic is *always* three + dimensional, with the last dimension being the color planes (see + *output_dims* parameter on the constructor). + + .. _de-mosaic: https://en.wikipedia.org/wiki/Demosaicing + """ + if self._demo is None: + # Construct 3D representation of Bayer data (if necessary) + if self.output_dims == 2: + array_3d = self._to_3d(self.array) + else: + array_3d = self.array + # Construct representation of the bayer pattern + bayer = np.zeros(array_3d.shape, dtype=np.uint8) + ( + (ry, rx), (gy, gx), (Gy, Gx), (by, bx) + ) = PiBayerArray.BAYER_OFFSETS[self._header.bayer_order] + bayer[ry::2, rx::2, 0] = 1 # Red + bayer[gy::2, gx::2, 1] = 1 # Green + bayer[Gy::2, Gx::2, 1] = 1 # Green + bayer[by::2, bx::2, 2] = 1 # Blue + # Allocate output array with same shape as data and set up some + # constants to represent the weighted average window + window = (3, 3) + borders = (window[0] - 1, window[1] - 1) + border = (borders[0] // 2, borders[1] // 2) + # Pad out the data and the bayer pattern (np.pad is faster but + # unavailable on the version of numpy shipped with Raspbian at the + # time of writing) + rgb = np.zeros(( + array_3d.shape[0] + borders[0], + array_3d.shape[1] + borders[1], + array_3d.shape[2]), dtype=array_3d.dtype) + rgb[ + border[0]:rgb.shape[0] - border[0], + border[1]:rgb.shape[1] - border[1], + :] = array_3d + bayer_pad = np.zeros(( + array_3d.shape[0] + borders[0], + array_3d.shape[1] + borders[1], + array_3d.shape[2]), dtype=bayer.dtype) + bayer_pad[ + border[0]:bayer_pad.shape[0] - border[0], + border[1]:bayer_pad.shape[1] - border[1], + :] = bayer + bayer = bayer_pad + # For each plane in the RGB data, construct a view over the plane + # of 3x3 matrices. Then do the same for the bayer array and use + # Einstein summation to get the weighted average + self._demo = np.empty(array_3d.shape, dtype=array_3d.dtype) + for plane in range(3): + p = rgb[..., plane] + b = bayer[..., plane] + pview = as_strided(p, shape=( + p.shape[0] - borders[0], + p.shape[1] - borders[1]) + window, strides=p.strides * 2) + bview = as_strided(b, shape=( + b.shape[0] - borders[0], + b.shape[1] - borders[1]) + window, strides=b.strides * 2) + psum = np.einsum('ijkl->ij', pview) + bsum = np.einsum('ijkl->ij', bview) + self._demo[..., plane] = psum // bsum + return self._demo + + +class PiMotionArray(PiArrayOutput): + """ + Produces a 3-dimensional array of motion vectors from the H.264 encoder. + + This custom output class is intended to be used with the *motion_output* + parameter of the :meth:`~picamera.PiCamera.start_recording` method. Once + recording has finished, the class generates a 3-dimensional numpy array + organized as (frames, rows, columns) where ``rows`` and ``columns`` are the + number of rows and columns of `macro-blocks`_ (16x16 pixel blocks) in the + original frames. There is always one extra column of macro-blocks present + in motion vector data. + + The data-type of the :attr:`~PiArrayOutput.array` is an (x, y, sad) + structure where ``x`` and ``y`` are signed 1-byte values, and ``sad`` is an + unsigned 2-byte value representing the `sum of absolute differences`_ of + the block. For example:: + + import picamera + import picamera.array + + with picamera.PiCamera() as camera: + with picamera.array.PiMotionArray(camera) as output: + camera.resolution = (640, 480) + camera.start_recording( + '/dev/null', format='h264', motion_output=output) + camera.wait_recording(30) + camera.stop_recording() + print('Captured %d frames' % output.array.shape[0]) + print('Frames are %dx%d blocks big' % ( + output.array.shape[2], output.array.shape[1])) + + If you are using the GPU resizer with your recording, use the optional + *size* parameter to specify the resizer's output resolution when + constructing the array:: + + import picamera + import picamera.array + + with picamera.PiCamera() as camera: + camera.resolution = (640, 480) + with picamera.array.PiMotionArray(camera, size=(320, 240)) as output: + camera.start_recording( + '/dev/null', format='h264', motion_output=output, + resize=(320, 240)) + camera.wait_recording(30) + camera.stop_recording() + print('Captured %d frames' % output.array.shape[0]) + print('Frames are %dx%d blocks big' % ( + output.array.shape[2], output.array.shape[1])) + + .. note:: + + This class is not suitable for real-time analysis of motion vector + data. See the :class:`PiMotionAnalysis` class instead. + + .. _macro-blocks: https://en.wikipedia.org/wiki/Macroblock + .. _sum of absolute differences: https://en.wikipedia.org/wiki/Sum_of_absolute_differences + """ + + def flush(self): + super(PiMotionArray, self).flush() + width, height = self.size or self.camera.resolution + cols = ((width + 15) // 16) + 1 + rows = (height + 15) // 16 + b = self.getvalue() + frames = len(b) // (cols * rows * motion_dtype.itemsize) + self.array = np.frombuffer(b, dtype=motion_dtype).reshape((frames, rows, cols)) + + +class PiAnalysisOutput(io.IOBase): + """ + Base class for analysis outputs. + + This class extends :class:`io.IOBase` with a stub :meth:`analyze` method + which will be called for each frame output. In this base implementation the + method simply raises :exc:`NotImplementedError`. + """ + + def __init__(self, camera, size=None): + super(PiAnalysisOutput, self).__init__() + self.camera = camera + self.size = size + + def writable(self): + return True + + def write(self, b): + return len(b) + + def analyze(self, array): + """ + Stub method for users to override. + """ + try: + self.analyse(array) + warnings.warn( + PiCameraDeprecated( + 'The analyse method is deprecated; use analyze (US ' + 'English spelling) instead')) + except NotImplementedError: + raise + + def analyse(self, array): + """ + Deprecated alias of :meth:`analyze`. + """ + raise NotImplementedError + + +class PiRGBAnalysis(PiAnalysisOutput): + """ + Provides a basis for per-frame RGB analysis classes. + + This custom output class is intended to be used with the + :meth:`~picamera.PiCamera.start_recording` method when it is called with + *format* set to ``'rgb'`` or ``'bgr'``. While recording is in progress, the + :meth:`~PiAnalysisOutput.write` method converts incoming frame data into a + numpy array and calls the stub :meth:`~PiAnalysisOutput.analyze` method + with the resulting array (this deliberately raises + :exc:`NotImplementedError` in this class; you must override it in your + descendent class). + + .. note:: + + If your overridden :meth:`~PiAnalysisOutput.analyze` method runs slower + than the required framerate (e.g. 33.333ms when framerate is 30fps) + then the camera's effective framerate will be reduced. Furthermore, + this doesn't take into account the overhead of picamera itself so in + practice your method needs to be a bit faster still. + + The array passed to :meth:`~PiAnalysisOutput.analyze` is organized as + (rows, columns, channel) where the channels 0, 1, and 2 are R, G, and B + respectively (or B, G, R if *format* is ``'bgr'``). + """ + + def write(self, b): + result = super(PiRGBAnalysis, self).write(b) + self.analyze(bytes_to_rgb(b, self.size or self.camera.resolution)) + return result + + +class PiYUVAnalysis(PiAnalysisOutput): + """ + Provides a basis for per-frame YUV analysis classes. + + This custom output class is intended to be used with the + :meth:`~picamera.PiCamera.start_recording` method when it is called with + *format* set to ``'yuv'``. While recording is in progress, the + :meth:`~PiAnalysisOutput.write` method converts incoming frame data into a + numpy array and calls the stub :meth:`~PiAnalysisOutput.analyze` method + with the resulting array (this deliberately raises + :exc:`NotImplementedError` in this class; you must override it in your + descendent class). + + .. note:: + + If your overridden :meth:`~PiAnalysisOutput.analyze` method runs slower + than the required framerate (e.g. 33.333ms when framerate is 30fps) + then the camera's effective framerate will be reduced. Furthermore, + this doesn't take into account the overhead of picamera itself so in + practice your method needs to be a bit faster still. + + The array passed to :meth:`~PiAnalysisOutput.analyze` is organized as + (rows, columns, channel) where the channel 0 is Y (luminance), while 1 and + 2 are U and V (chrominance) respectively. The chrominance values normally + have quarter resolution of the luminance values but this class makes all + channels equal resolution for ease of use. + """ + + def write(self, b): + result = super(PiYUVAnalysis, self).write(b) + self.analyze(bytes_to_yuv(b, self.size or self.camera.resolution)) + return result + + +class PiMotionAnalysis(PiAnalysisOutput): + """ + Provides a basis for real-time motion analysis classes. + + This custom output class is intended to be used with the *motion_output* + parameter of the :meth:`~picamera.PiCamera.start_recording` method. While + recording is in progress, the write method converts incoming motion data + into numpy arrays and calls the stub :meth:`~PiAnalysisOutput.analyze` + method with the resulting array (which deliberately raises + :exc:`NotImplementedError` in this class). + + .. note:: + + If your overridden :meth:`~PiAnalysisOutput.analyze` method runs slower + than the required framerate (e.g. 33.333ms when framerate is 30fps) + then the camera's effective framerate will be reduced. Furthermore, + this doesn't take into account the overhead of picamera itself so in + practice your method needs to be a bit faster still. + + The array passed to :meth:`~PiAnalysisOutput.analyze` is organized as + (rows, columns) where ``rows`` and ``columns`` are the number of rows and + columns of `macro-blocks`_ (16x16 pixel blocks) in the original frames. + There is always one extra column of macro-blocks present in motion vector + data. + + The data-type of the array is an (x, y, sad) structure where ``x`` and + ``y`` are signed 1-byte values, and ``sad`` is an unsigned 2-byte value + representing the `sum of absolute differences`_ of the block. + + An example of a crude motion detector is given below:: + + import numpy as np + import picamera + import picamera.array + + class DetectMotion(picamera.array.PiMotionAnalysis): + def analyze(self, a): + a = np.sqrt( + np.square(a['x'].astype(np.float)) + + np.square(a['y'].astype(np.float)) + ).clip(0, 255).astype(np.uint8) + # If there're more than 10 vectors with a magnitude greater + # than 60, then say we've detected motion + if (a > 60).sum() > 10: + print('Motion detected!') + + with picamera.PiCamera() as camera: + with DetectMotion(camera) as output: + camera.resolution = (640, 480) + camera.start_recording( + '/dev/null', format='h264', motion_output=output) + camera.wait_recording(30) + camera.stop_recording() + + You can use the optional *size* parameter to specify the output resolution + of the GPU resizer, if you are using the *resize* parameter of + :meth:`~picamera.PiCamera.start_recording`. + """ + + def __init__(self, camera, size=None): + super(PiMotionAnalysis, self).__init__(camera, size) + self.cols = None + self.rows = None + + def write(self, b): + result = super(PiMotionAnalysis, self).write(b) + if self.cols is None: + width, height = self.size or self.camera.resolution + self.cols = ((width + 15) // 16) + 1 + self.rows = (height + 15) // 16 + self.analyze( + np.frombuffer(b, dtype=motion_dtype).\ + reshape((self.rows, self.cols))) + return result + + +class MMALArrayBuffer(mo.MMALBuffer): + __slots__ = ('_shape',) + + def __init__(self, port, buf): + super(MMALArrayBuffer, self).__init__(buf) + width = port._format[0].es[0].video.width + height = port._format[0].es[0].video.height + bpp = self.size // (width * height) + self.offset = 0 + self.length = width * height * bpp + self._shape = (height, width, bpp) + + def __enter__(self): + mmal_check( + mmal.mmal_buffer_header_mem_lock(self._buf), + prefix='unable to lock buffer header memory') + assert self.offset == 0 + return np.frombuffer( + ct.cast( + self._buf[0].data, + ct.POINTER(ct.c_uint8 * self._buf[0].alloc_size)).contents, + dtype=np.uint8, count=self.length).reshape(self._shape) + + def __exit__(self, *exc): + mmal.mmal_buffer_header_mem_unlock(self._buf) + return False + + +class PiArrayTransform(mo.MMALPythonComponent): + """ + A derivative of :class:`~picamera.mmalobj.MMALPythonComponent` which eases + the construction of custom MMAL transforms by representing buffer data as + numpy arrays. The *formats* parameter specifies the accepted input + formats as a sequence of strings (default: 'rgb', 'bgr', 'rgba', 'bgra'). + + Override the :meth:`transform` method to modify buffers sent to the + component, then place it in your MMAL pipeline as you would a normal + encoder. + """ + __slots__ = () + + def __init__(self, formats=('rgb', 'bgr', 'rgba', 'bgra')): + super(PiArrayTransform, self).__init__() + if isinstance(formats, bytes): + formats = formats.decode('ascii') + if isinstance(formats, str): + formats = (formats,) + try: + formats = { + { + 'rgb': mmal.MMAL_ENCODING_RGB24, + 'bgr': mmal.MMAL_ENCODING_BGR24, + 'rgba': mmal.MMAL_ENCODING_RGBA, + 'bgra': mmal.MMAL_ENCODING_BGRA, + }[fmt] + for fmt in formats + } + except KeyError as e: + raise PiCameraValueError( + 'PiArrayTransform cannot handle format %s' % str(e)) + self.inputs[0].supported_formats = formats + self.outputs[0].supported_formats = formats + + def _callback(self, port, source_buf): + try: + target_buf = self.outputs[0].get_buffer(False) + except PiCameraPortDisabled: + return False + if target_buf: + target_buf.copy_meta(source_buf) + result = self.transform( + MMALArrayBuffer(port, source_buf._buf), + MMALArrayBuffer(self.outputs[0], target_buf._buf)) + try: + self.outputs[0].send_buffer(target_buf) + except PiCameraPortDisabled: + return False + return False + + def transform(self, source, target): + """ + This method will be called for every frame passing through the + transform. The *source* and *target* parameters represent buffers from + the input and output ports of the transform respectively. They will be + derivatives of :class:`~picamera.mmalobj.MMALBuffer` which return a + 3-dimensional numpy array when used as context managers. For example:: + + def transform(self, source, target): + with source as source_array, target as target_array: + # Copy the source array data to the target + target_array[...] = source_array + # Draw a box around the edges + target_array[0, :, :] = 0xff + target_array[-1, :, :] = 0xff + target_array[:, 0, :] = 0xff + target_array[:, -1, :] = 0xff + return False + + The target buffer's meta-data starts out as a copy of the source + buffer's meta-data, but the target buffer's data starts out + uninitialized. + """ + return False diff --git a/picamera/bcm_host.py b/picamera/bcm_host.py new file mode 100644 index 0000000..7ecd362 --- /dev/null +++ b/picamera/bcm_host.py @@ -0,0 +1,991 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python header conversion +# Copyright (c) 2013-2017 Dave Jones +# +# Original headers +# Copyright (c) 2012, Broadcom Europe Ltd +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str equivalent to Py3's +str = type('') + +import ctypes as ct +import warnings + +_lib = ct.CDLL('libbcm_host.so') + +# bcm_host.h ################################################################# + +bcm_host_init = _lib.bcm_host_init +bcm_host_init.argtypes = [] +bcm_host_init.restype = None + +bcm_host_deinit = _lib.bcm_host_deinit +bcm_host_deinit.argtypes = [] +bcm_host_deinit.restype = None + +graphics_get_display_size = _lib.graphics_get_display_size +graphics_get_display_size.argtypes = [ct.c_uint16, ct.POINTER(ct.c_uint32), ct.POINTER(ct.c_uint32)] +graphics_get_display_size.restype = ct.c_int32 + +# vchi.h ##################################################################### + +VCHI_INSTANCE_T = ct.c_void_p +VCHI_CONNECTION_T = ct.c_void_p + +# vcos_platform.h ############################################################ + +VCOS_UNSIGNED = ct.c_uint32 + +# vcos_types.h ############################################################### + +VCOS_STATUS_T = ct.c_uint32 # enum +( + VCOS_SUCCESS, + VCOS_EAGAIN, + VCOS_ENOENT, + VCOS_ENOSPC, + VCOS_EINVAL, + VCOS_EACCESS, + VCOS_ENOMEM, + VCOS_ENOSYS, + VCOS_EEXIST, + VCOS_ENXIO, + VCOS_EINTR, +) = range(11) + +vcos_bool_t = ct.c_int32 +vcos_fourcc_t = ct.c_int32 + +def VCOS_ALIGN_UP(value, round_to): + # Note: this function assumes round_to is some power of 2. + return (value + (round_to - 1)) & ~(round_to - 1) + +def VCOS_ALIGN_DOWN(value, round_to): + # Note: this function assumes round_to is some power of 2. + return value & ~(round_to - 1) + +# vc_image_types.h ########################################################### + +class VC_RECT_T(ct.Structure): + _fields_ = [ + ('x', ct.c_int32), + ('y', ct.c_int32), + ('width', ct.c_int32), + ('height', ct.c_int32), + ] + +VC_IMAGE_TYPE_T = ct.c_uint32 # enum +( + VC_IMAGE_MIN, + VC_IMAGE_RGB565, + VC_IMAGE_1BPP, + VC_IMAGE_YUV420, + VC_IMAGE_48BPP, + VC_IMAGE_RGB888, + VC_IMAGE_8BPP, + VC_IMAGE_4BPP, + VC_IMAGE_3D32, + VC_IMAGE_3D32B, + VC_IMAGE_3D32MAT, + VC_IMAGE_RGB2X9, + VC_IMAGE_RGB666, + VC_IMAGE_PAL4_OBSOLETE, + VC_IMAGE_PAL8_OBSOLETE, + VC_IMAGE_RGBA32, + VC_IMAGE_YUV422, + VC_IMAGE_RGBA565, + VC_IMAGE_RGBA16, + VC_IMAGE_YUV_UV, + VC_IMAGE_TF_RGBA32, + VC_IMAGE_TF_RGBX32, + VC_IMAGE_TF_FLOAT, + VC_IMAGE_TF_RGBA16, + VC_IMAGE_TF_RGBA5551, + VC_IMAGE_TF_RGB565, + VC_IMAGE_TF_YA88, + VC_IMAGE_TF_BYTE, + VC_IMAGE_TF_PAL8, + VC_IMAGE_TF_PAL4, + VC_IMAGE_TF_ETC1, + VC_IMAGE_BGR888, + VC_IMAGE_BGR888_NP, + VC_IMAGE_BAYER, + VC_IMAGE_CODEC, + VC_IMAGE_YUV_UV32, + VC_IMAGE_TF_Y8, + VC_IMAGE_TF_A8, + VC_IMAGE_TF_SHORT, + VC_IMAGE_TF_1BPP, + VC_IMAGE_OPENGL, + VC_IMAGE_YUV444I, + VC_IMAGE_YUV422PLANAR, + VC_IMAGE_ARGB8888, + VC_IMAGE_XRGB8888, + VC_IMAGE_YUV422YUYV, + VC_IMAGE_YUV422YVYU, + VC_IMAGE_YUV422UYVY, + VC_IMAGE_YUV422VYUY, + VC_IMAGE_RGBX32, + VC_IMAGE_RGBX8888, + VC_IMAGE_BGRX8888, + VC_IMAGE_YUV420SP, + VC_IMAGE_YUV444PLANAR, + VC_IMAGE_TF_U8, + VC_IMAGE_TF_V8, + VC_IMAGE_MAX, +) = range(57) + +TRANSFORM_HFLIP = 1 << 0 +TRANSFORM_VFLIP = 1 << 1 +TRANSFORM_TRANSPOSE = 1 << 2 + +VC_IMAGE_TRANSFORM_T = ct.c_uint32 # enum +VC_IMAGE_ROT0 = 0 +VC_IMAGE_MIRROR_ROT0 = TRANSFORM_HFLIP +VC_IMAGE_MIRROR_ROT180 = TRANSFORM_VFLIP +VC_IMAGE_ROT180 = TRANSFORM_HFLIP | TRANSFORM_VFLIP +VC_IMAGE_MIRROR_ROT90 = TRANSFORM_TRANSPOSE +VC_IMAGE_ROT270 = TRANSFORM_TRANSPOSE | TRANSFORM_HFLIP +VC_IMAGE_ROT90 = TRANSFORM_TRANSPOSE | TRANSFORM_VFLIP +VC_IMAGE_MIRROR_ROT270 = TRANSFORM_TRANSPOSE | TRANSFORM_HFLIP | TRANSFORM_VFLIP + +VC_IMAGE_BAYER_ORDER_T = ct.c_uint32 # enum +( + VC_IMAGE_BAYER_RGGB, + VC_IMAGE_BAYER_GBRG, + VC_IMAGE_BAYER_BGGR, + VC_IMAGE_BAYER_GRBG, +) = range(4) + +VC_IMAGE_BAYER_FORMAT_T = ct.c_uint32 # enum +( + VC_IMAGE_BAYER_RAW6, + VC_IMAGE_BAYER_RAW7, + VC_IMAGE_BAYER_RAW8, + VC_IMAGE_BAYER_RAW10, + VC_IMAGE_BAYER_RAW12, + VC_IMAGE_BAYER_RAW14, + VC_IMAGE_BAYER_RAW16, + VC_IMAGE_BAYER_RAW10_8, + VC_IMAGE_BAYER_RAW12_8, + VC_IMAGE_BAYER_RAW14_8, + VC_IMAGE_BAYER_RAW10L, + VC_IMAGE_BAYER_RAW12L, + VC_IMAGE_BAYER_RAW14L, + VC_IMAGE_BAYER_RAW16_BIG_ENDIAN, + VC_IMAGE_BAYER_RAW4, +) = range(15) + +# vc_display_types.h ######################################################### + +VCOS_DISPLAY_INPUT_FORMAT_T = ct.c_uint32 # enum +( + VCOS_DISPLAY_INPUT_FORMAT_INVALID, + VCOS_DISPLAY_INPUT_FORMAT_RGB888, + VCOS_DISPLAY_INPUT_FORMAT_RGB565 +) = range(3) + +DISPLAY_INPUT_FORMAT_INVALID = VCOS_DISPLAY_INPUT_FORMAT_INVALID +DISPLAY_INPUT_FORMAT_RGB888 = VCOS_DISPLAY_INPUT_FORMAT_RGB888 +DISPLAY_INPUT_FORMAT_RGB565 = VCOS_DISPLAY_INPUT_FORMAT_RGB565 +DISPLAY_INPUT_FORMAT_T = VCOS_DISPLAY_INPUT_FORMAT_T + +DISPLAY_3D_FORMAT_T = ct.c_uint32 # enum +( + DISPLAY_3D_UNSUPPORTED, + DISPLAY_3D_INTERLEAVED, + DISPLAY_3D_SBS_FULL_AUTO, + DISPLAY_3D_SBS_HALF_HORIZ, + DISPLAY_3D_TB_HALF, + DISPLAY_3D_FRAME_PACKING, + DISPLAY_3D_FRAME_SEQUENTIAL, + DISPLAY_3D_FORMAT_MAX, +) = range(8) + +DISPLAY_INTERFACE_T = ct.c_uint32 # enum +( + DISPLAY_INTERFACE_MIN, + DISPLAY_INTERFACE_SMI, + DISPLAY_INTERFACE_DPI, + DISPLAY_INTERFACE_DSI, + DISPLAY_INTERFACE_LVDS, + DISPLAY_INTERFACE_MAX, +) = range(6) + +DISPLAY_DITHER_T = ct.c_uint32 # enum +( + DISPLAY_DITHER_NONE, + DISPLAY_DITHER_RGB666, + DISPLAY_DITHER_RGB565, + DISPLAY_DITHER_RGB555, + DISPLAY_DITHER_MAX, +) = range(5) + +class DISPLAY_INFO_T(ct.Structure): + _fields_ = [ + ('type', DISPLAY_INTERFACE_T), + ('width', ct.c_uint32), + ('height', ct.c_uint32), + ('input_format', DISPLAY_INPUT_FORMAT_T), + ('interlaced', ct.c_uint32), + ('output_dither', DISPLAY_DITHER_T), + ('pixel_freq', ct.c_uint32), + ('line_rate', ct.c_uint32), + ('format_3d', DISPLAY_3D_FORMAT_T), + ('use_pixelvalve_1', ct.c_uint32), + ('dsi_video_mode', ct.c_uint32), + ('hvs_channel', ct.c_uint32), + ] + +# vc_dispmanx_types.h ######################################################## + +DISPMANX_DISPLAY_HANDLE_T = ct.c_uint32 +DISPMANX_UPDATE_HANDLE_T = ct.c_uint32 +DISPMANX_ELEMENT_HANDLE_T = ct.c_uint32 +DISPMANX_RESOURCE_HANDLE_T = ct.c_uint32 +DISPMANX_PROTECTION_T = ct.c_uint32 + +DISPMANX_TRANSFORM_T = ct.c_uint32 # enum +DISPMANX_NO_ROTATE = 0 +DISPMANX_ROTATE_90 = 1 +DISPMANX_ROTATE_180 = 2 +DISPMANX_ROTATE_270 = 3 +DISPMANX_FLIP_HRIZ = 1 << 16 +DISPMANX_FLIP_VERT = 1 << 17 +DISPMANX_STEREOSCOPIC_INVERT = 1 << 19 +DISPMANX_STEREOSCOPIC_NONE = 0 << 20 +DISPMANX_STEREOSCOPIC_MONO = 1 << 20 +DISPMANX_STEREOSCOPIC_SBS = 2 << 20 +DISPMANX_STEREOSCOPIC_TB = 3 << 20 +DISPMANX_STEREOSCOPIC_MASK = 15 << 20 +DISPMANX_SNAPSHOT_NO_YUV = 1 << 24 +DISPMANX_SNAPSHOT_NO_RGB = 1 << 25 +DISPMANX_SNAPSHOT_FILL = 1 << 26 +DISPMANX_SNAPSHOT_SWAP_RED_BLUE = 1 << 27 +DISPMANX_SNAPSHOT_PACK = 1 << 28 + +DISPMANX_FLAGS_ALPHA_T = ct.c_uint32 # enum +DISPMANX_FLAGS_ALPHA_FROM_SOURCE = 0 +DISPMANX_FLAGS_ALPHA_FIXED_ALL_PIXELS = 1 +DISPMANX_FLAGS_ALPHA_FIXED_NON_ZERO = 2 +DISPMANX_FLAGS_ALPHA_FIXED_EXCEED_0X07 = 3 +DISPMANX_FLAGS_ALPHA_PREMULT = 1 << 16 +DISPMANX_FLAGS_ALPHA_MIX = 1 << 17 + +class DISPMANX_ALPHA_T(ct.Structure): + _fields_ = [ + ('flags', DISPMANX_FLAGS_ALPHA_T), + ('opacity', ct.c_uint32), + ('mask', ct.c_void_p), + ] + +class VC_DISPMANX_ALPHA_T(ct.Structure): + _fields_ = [ + ('flags', DISPMANX_FLAGS_ALPHA_T), + ('opacity', ct.c_uint32), + ('mask', DISPMANX_RESOURCE_HANDLE_T), + ] + +DISPMANX_FLAGS_CLAMP_T = ct.c_uint32 # enum +( + DISPMANX_FLAGS_CLAMP_NONE, + DISPMANX_FLAGS_CLAMP_LUMA_TRANSPARENT, + DISPMANX_FLAGS_CLAMP_TRANSPARENT, + DISPMANX_FLAGS_CLAMP_REPLACE, +) = range(4) + +DISPMANX_FLAGS_KEYMASK_T = ct.c_uint32 # enum +DISPMANX_FLAGS_KEYMASK_OVERRIDE = 1 +DISPMANX_FLAGS_KEYMASK_SMOOTH = 1 << 1 +DISPMANX_FLAGS_KEYMASK_CR_INV = 1 << 2 +DISPMANX_FLAGS_KEYMASK_CB_INV = 1 << 3 +DISPMANX_FLAGS_KEYMASK_YY_INV = 1 << 4 + +class _YUV(ct.Structure): + _fields_ = [ + ('yy_upper', ct.c_uint8), + ('yy_lower', ct.c_uint8), + ('cr_upper', ct.c_uint8), + ('cr_lower', ct.c_uint8), + ('cb_upper', ct.c_uint8), + ('cb_lower', ct.c_uint8), + ] + +class _RGB(ct.Structure): + _fields_ = [ + ('red_upper', ct.c_uint8), + ('red_lower', ct.c_uint8), + ('green_upper', ct.c_uint8), + ('green_lower', ct.c_uint8), + ('blue_upper', ct.c_uint8), + ('blue_lower', ct.c_uint8), + ] + +class DISPMANX_CLAMP_KEYS_T(ct.Union): + _fields_ = [ + ('yuv', _YUV), + ('rgb', _RGB), + ] + +class DISPMANX_CLAMP_T(ct.Structure): + _fields_ = [ + ('mode', DISPMANX_FLAGS_CLAMP_T), + ('key_mask', DISPMANX_FLAGS_KEYMASK_T), + ('key_value', DISPMANX_CLAMP_KEYS_T), + ('replace_value', ct.c_uint32), + ] + +class DISPMANX_MODEINFO_T(ct.Structure): + _fields_ = [ + ('width', ct.c_int32), + ('height', ct.c_int32), + ('transform', DISPMANX_TRANSFORM_T), + ('input_format', DISPLAY_INPUT_FORMAT_T), + ('display_num', ct.c_uint32), + ] + +DISPMANX_CALLBACK_FUNC_T = ct.CFUNCTYPE( + None, + DISPMANX_UPDATE_HANDLE_T, ct.c_void_p) + +DISPMANX_PROGRESS_CALLBACK_FUNC_T = ct.CFUNCTYPE( + None, + DISPMANX_UPDATE_HANDLE_T, ct.c_uint32, ct.c_void_p) + +# vc_dispmanx.h ############################################################## + +vc_dispmanx_stop = _lib.vc_dispmanx_stop +vc_dispmanx_stop.argtypes = [] +vc_dispmanx_stop.restype = None + +vc_dispmanx_rect_set = _lib.vc_dispmanx_rect_set +vc_dispmanx_rect_set.argtypes = [ct.POINTER(VC_RECT_T), ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32] +vc_dispmanx_rect_set.restype = ct.c_int + +vc_dispmanx_resource_create = _lib.vc_dispmanx_resource_create +vc_dispmanx_resource_create.argtypes = [VC_IMAGE_TYPE_T, ct.c_uint32, ct.c_uint32, ct.POINTER(ct.c_uint32)] +vc_dispmanx_resource_create.restype = DISPMANX_RESOURCE_HANDLE_T + +vc_dispmanx_resource_write_data = _lib.vc_dispmanx_resource_write_data +vc_dispmanx_resource_write_data.argtypes = [DISPMANX_RESOURCE_HANDLE_T, VC_IMAGE_TYPE_T, ct.c_int, ct.c_void_p, ct.POINTER(VC_RECT_T)] +vc_dispmanx_resource_write_data.restype = ct.c_int + +vc_dispmanx_resource_read_data = _lib.vc_dispmanx_resource_read_data +vc_dispmanx_resource_read_data.argtypes = [DISPMANX_RESOURCE_HANDLE_T, ct.POINTER(VC_RECT_T), ct.c_void_p, ct.c_uint32] +vc_dispmanx_resource_read_data.restype = ct.c_int + +vc_dispmanx_resource_delete = _lib.vc_dispmanx_resource_delete +vc_dispmanx_resource_delete.argtypes = [DISPMANX_RESOURCE_HANDLE_T] +vc_dispmanx_resource_delete.restype = ct.c_int + +vc_dispmanx_display_open = _lib.vc_dispmanx_display_open +vc_dispmanx_display_open.argtypes = [ct.c_uint32] +vc_dispmanx_display_open.restype = DISPMANX_DISPLAY_HANDLE_T + +vc_dispmanx_display_open_mode = _lib.vc_dispmanx_display_open_mode +vc_dispmanx_display_open_mode.argtypes = [ct.c_uint32, ct.c_uint32] +vc_dispmanx_display_open_mode.restype = DISPMANX_DISPLAY_HANDLE_T + +vc_dispmanx_display_open_offscreen = _lib.vc_dispmanx_display_open_offscreen +vc_dispmanx_display_open_offscreen.argtypes = [DISPMANX_RESOURCE_HANDLE_T, DISPMANX_TRANSFORM_T] +vc_dispmanx_display_open_offscreen.restype = DISPMANX_DISPLAY_HANDLE_T + +vc_dispmanx_display_reconfigure = _lib.vc_dispmanx_display_reconfigure +vc_dispmanx_display_reconfigure.argtypes = [DISPMANX_DISPLAY_HANDLE_T, ct.c_uint32] +vc_dispmanx_display_reconfigure.restype = ct.c_int + +vc_dispmanx_display_set_destination = _lib.vc_dispmanx_display_set_destination +vc_dispmanx_display_set_destination.argtypes = [DISPMANX_DISPLAY_HANDLE_T, DISPMANX_RESOURCE_HANDLE_T] +vc_dispmanx_display_set_destination.restype = ct.c_int + +vc_dispmanx_display_set_background = _lib.vc_dispmanx_display_set_background +vc_dispmanx_display_set_background.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_DISPLAY_HANDLE_T, ct.c_uint8, ct.c_uint8, ct.c_uint8] +vc_dispmanx_display_set_background.restype = ct.c_int + +vc_dispmanx_display_get_info = _lib.vc_dispmanx_display_get_info +vc_dispmanx_display_get_info.argtypes = [DISPMANX_DISPLAY_HANDLE_T, ct.POINTER(DISPMANX_MODEINFO_T)] +vc_dispmanx_display_get_info.restype = ct.c_int + +vc_dispmanx_display_close = _lib.vc_dispmanx_display_close +vc_dispmanx_display_close.argtypes = [DISPMANX_DISPLAY_HANDLE_T] +vc_dispmanx_display_close.restype = ct.c_int + +vc_dispmanx_update_start = _lib.vc_dispmanx_update_start +vc_dispmanx_update_start.argtypes = [ct.c_int32] +vc_dispmanx_update_start.restype = DISPMANX_UPDATE_HANDLE_T + +vc_dispmanx_element_add = _lib.vc_dispmanx_element_add +vc_dispmanx_element_add.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_DISPLAY_HANDLE_T, ct.c_int32, ct.POINTER(VC_RECT_T), DISPMANX_RESOURCE_HANDLE_T, ct.POINTER(VC_RECT_T), DISPMANX_PROTECTION_T, VC_DISPMANX_ALPHA_T, DISPMANX_CLAMP_T, DISPMANX_TRANSFORM_T] +vc_dispmanx_element_add.restype = DISPMANX_ELEMENT_HANDLE_T + +vc_dispmanx_element_change_source = _lib.vc_dispmanx_element_change_source +vc_dispmanx_element_change_source.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_ELEMENT_HANDLE_T, DISPMANX_RESOURCE_HANDLE_T] +vc_dispmanx_element_change_source.restype = ct.c_int + +vc_dispmanx_element_change_layer = _lib.vc_dispmanx_element_change_layer +vc_dispmanx_element_change_layer.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_ELEMENT_HANDLE_T, ct.c_int32] +vc_dispmanx_element_change_layer.restype = ct.c_int + +vc_dispmanx_element_modified = _lib.vc_dispmanx_element_modified +vc_dispmanx_element_modified.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_ELEMENT_HANDLE_T, ct.POINTER(VC_RECT_T)] +vc_dispmanx_element_modified.restype = ct.c_int + +vc_dispmanx_element_remove = _lib.vc_dispmanx_element_remove +vc_dispmanx_element_remove.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_ELEMENT_HANDLE_T] +vc_dispmanx_element_remove.restype = ct.c_int + +vc_dispmanx_update_submit = _lib.vc_dispmanx_update_submit +vc_dispmanx_update_submit.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_CALLBACK_FUNC_T, ct.c_void_p] +vc_dispmanx_update_submit.restype = ct.c_int + +vc_dispmanx_update_submit_sync = _lib.vc_dispmanx_update_submit_sync +vc_dispmanx_update_submit_sync.argtypes = [DISPMANX_UPDATE_HANDLE_T] +vc_dispmanx_update_submit_sync.restype = ct.c_int + +vc_dispmanx_query_image_formats = _lib.vc_dispmanx_query_image_formats +vc_dispmanx_query_image_formats.argtypes = [ct.POINTER(ct.c_uint32)] +vc_dispmanx_query_image_formats.restype = ct.c_int + +vc_dispmanx_element_change_attributes = _lib.vc_dispmanx_element_change_attributes +vc_dispmanx_element_change_attributes.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_ELEMENT_HANDLE_T, ct.c_uint32, ct.c_int32, ct.c_uint8, ct.POINTER(VC_RECT_T), ct.POINTER(VC_RECT_T), DISPMANX_RESOURCE_HANDLE_T, DISPMANX_TRANSFORM_T] +vc_dispmanx_element_change_attributes.restype = ct.c_int + +vc_vchi_dispmanx_init = _lib.vc_vchi_dispmanx_init +vc_vchi_dispmanx_init.argtypes = [VCHI_INSTANCE_T, ct.POINTER(VCHI_CONNECTION_T), ct.c_uint32] +vc_vchi_dispmanx_init.restype = None + +vc_dispmanx_snapshot = _lib.vc_dispmanx_snapshot +vc_dispmanx_snapshot.argtypes = [DISPMANX_DISPLAY_HANDLE_T, DISPMANX_RESOURCE_HANDLE_T, DISPMANX_TRANSFORM_T] +vc_dispmanx_snapshot.restype = ct.c_int + +vc_dispmanx_resource_set_palette = _lib.vc_dispmanx_resource_set_palette +vc_dispmanx_resource_set_palette.argtypes = [DISPMANX_RESOURCE_HANDLE_T, ct.c_void_p, ct.c_int, ct.c_int] +vc_dispmanx_resource_set_palette.restype = ct.c_int + +vc_dispmanx_vsync_callback = _lib.vc_dispmanx_vsync_callback +vc_dispmanx_vsync_callback.argtypes = [DISPMANX_DISPLAY_HANDLE_T, DISPMANX_CALLBACK_FUNC_T, ct.c_void_p] +vc_dispmanx_vsync_callback.restype = ct.c_int + +# vc_cec.h ################################################################### + +CEC_BROADCAST_ADDR = 0xF +CEC_TV_ADDRESS = 0 +CEC_MAX_XMIT_LENGTH = 15 +CEC_CLEAR_ADDR = 0xFFFF + +CEC_VERSION = 0x4 +CEC_VENDOR_ID_BROADCOM = 0x18C086 +CEC_VENDOR_ID_ONKYO = 0x0009B0 +CEC_VENDOR_ID_PANASONIC_EUROPE = 0x000F12 +CEC_VENDOR_ID = 0 + +CEC_BLOCKING = 1 +CEC_NONBLOCKING = 0 + +CEC_AllDevices_T = ct.c_uint32 # enum +( + CEC_AllDevices_eTV, + CEC_AllDevices_eRec1, + CEC_AllDevices_eRec2, + CEC_AllDevices_eSTB1, + CEC_AllDevices_eDVD1, + CEC_AllDevices_eAudioSystem, + CEC_AllDevices_eSTB2, + CEC_AllDevices_eSTB3, + CEC_AllDevices_eDVD2, + CEC_AllDevices_eRec3, + CEC_AllDevices_eSTB4, + CEC_AllDevices_eDVD3, + CEC_AllDevices_eRsvd3, + CEC_AllDevices_eRsvd4, + CEC_AllDevices_eFreeUse, + CEC_AllDevices_eUnRegistered, +) = range(16) + +CEC_DEVICE_TYPE_T = ct.c_uint32 # enum +( + CEC_DeviceType_TV, + CEC_DeviceType_Rec, + CEC_DeviceType_Reserved, + CEC_DeviceType_Tuner, + CEC_DeviceType_Playback, + CEC_DeviceType_Audio, + CEC_DeviceType_Switch, + CEC_DeviceType_VidProc, +) = range(8) +CEC_DeviceType_Invalid = 0xF + +CEC_OPCODE_T = ct.c_uint32 # enum +CEC_Opcode_FeatureAbort = 0x00 +CEC_Opcode_ImageViewOn = 0x04 +CEC_Opcode_TunerStepIncrement = 0x05 +CEC_Opcode_TunerStepDecrement = 0x06 +CEC_Opcode_TunerDeviceStatus = 0x07 +CEC_Opcode_GiveTunerDeviceStatus = 0x08 +CEC_Opcode_RecordOn = 0x09 +CEC_Opcode_RecordStatus = 0x0A +CEC_Opcode_RecordOff = 0x0B +CEC_Opcode_TextViewOn = 0x0D +CEC_Opcode_RecordTVScreen = 0x0F +CEC_Opcode_GiveDeckStatus = 0x1A +CEC_Opcode_DeckStatus = 0x1B +CEC_Opcode_SetMenuLanguage = 0x32 +CEC_Opcode_ClearAnalogTimer = 0x33 +CEC_Opcode_SetAnalogTimer = 0x34 +CEC_Opcode_TimerStatus = 0x35 +CEC_Opcode_Standby = 0x36 +CEC_Opcode_Play = 0x41 +CEC_Opcode_DeckControl = 0x42 +CEC_Opcode_TimerClearedStatus = 0x43 +CEC_Opcode_UserControlPressed = 0x44 +CEC_Opcode_UserControlReleased = 0x45 +CEC_Opcode_GiveOSDName = 0x46 +CEC_Opcode_SetOSDName = 0x47 +CEC_Opcode_SetOSDString = 0x64 +CEC_Opcode_SetTimerProgramTitle = 0x67 +CEC_Opcode_SystemAudioModeRequest = 0x70 +CEC_Opcode_GiveAudioStatus = 0x71 +CEC_Opcode_SetSystemAudioMode = 0x72 +CEC_Opcode_ReportAudioStatus = 0x7A +CEC_Opcode_GiveSystemAudioModeStatus = 0x7D +CEC_Opcode_SystemAudioModeStatus = 0x7E +CEC_Opcode_RoutingChange = 0x80 +CEC_Opcode_RoutingInformation = 0x81 +CEC_Opcode_ActiveSource = 0x82 +CEC_Opcode_GivePhysicalAddress = 0x83 +CEC_Opcode_ReportPhysicalAddress = 0x84 +CEC_Opcode_RequestActiveSource = 0x85 +CEC_Opcode_SetStreamPath = 0x86 +CEC_Opcode_DeviceVendorID = 0x87 +CEC_Opcode_VendorCommand = 0x89 +CEC_Opcode_VendorRemoteButtonDown = 0x8A +CEC_Opcode_VendorRemoteButtonUp = 0x8B +CEC_Opcode_GiveDeviceVendorID = 0x8C +CEC_Opcode_MenuRequest = 0x8D +CEC_Opcode_MenuStatus = 0x8E +CEC_Opcode_GiveDevicePowerStatus = 0x8F +CEC_Opcode_ReportPowerStatus = 0x90 +CEC_Opcode_GetMenuLanguage = 0x91 +CEC_Opcode_SelectAnalogService = 0x92 +CEC_Opcode_SelectDigitalService = 0x93 +CEC_Opcode_SetDigitalTimer = 0x97 +CEC_Opcode_ClearDigitalTimer = 0x99 +CEC_Opcode_SetAudioRate = 0x9A +CEC_Opcode_InactiveSource = 0x9D +CEC_Opcode_CECVersion = 0x9E +CEC_Opcode_GetCECVersion = 0x9F +CEC_Opcode_VendorCommandWithID = 0xA0 +CEC_Opcode_ClearExternalTimer = 0xA1 +CEC_Opcode_SetExternalTimer = 0xA2 +CEC_Opcode_ReportShortAudioDescriptor = 0xA3 +CEC_Opcode_RequestShortAudioDescriptor = 0xA4 +CEC_Opcode_InitARC = 0xC0 +CEC_Opcode_ReportARCInited = 0xC1 +CEC_Opcode_ReportARCTerminated = 0xC2 +CEC_Opcode_RequestARCInit = 0xC3 +CEC_Opcode_RequestARCTermination = 0xC4 +CEC_Opcode_TerminateARC = 0xC5 +CEC_Opcode_CDC = 0xF8 +CEC_Opcode_Abort = 0xFF + +CEC_ABORT_REASON_T = ct.c_uint32 # enum +( + CEC_Abort_Reason_Unrecognised_Opcode, + CEC_Abort_Reason_Wrong_Mode, + CEC_Abort_Reason_Cannot_Provide_Source, + CEC_Abort_Reason_Invalid_Operand, + CEC_Abort_Reason_Refused, + CEC_Abort_Reason_Undetermined, +) = range(6) + +CEC_DISPLAY_CONTROL_T = ct.c_uint32 # enum +CEC_DISPLAY_CONTROL_DEFAULT_TIME = 0 +CEC_DISPLAY_CONTROL_UNTIL_CLEARED = 1 << 6 +CEC_DISPLAY_CONTROL_CLEAR_PREV_MSG = 1 << 7 + +CEC_POWER_STATUS_T = ct.c_uint32 # enum +( + CEC_POWER_STATUS_ON, + CEC_POWER_STATUS_STANDBY, + CEC_POWER_STATUS_ON_PENDING, + CEC_POWER_STATUS_STANDBY_PENDING, +) = range(4) + +CEC_MENU_STATE_T = ct.c_uint32 # enum +( + CEC_MENU_STATE_ACTIVATED, + CEC_MENU_STATE_DEACTIVATED, + CEC_MENU_STATE_QUERY, +) = range(3) + +CEC_DECK_INFO_T = ct.c_uint32 # enum +( + CEC_DECK_INFO_PLAY, + CEC_DECK_INFO_RECORD, + CEC_DECK_INFO_PLAY_REVERSE, + CEC_DECK_INFO_STILL, + CEC_DECK_INFO_SLOW, + CEC_DECK_INFO_SLOW_REVERSE, + CEC_DECK_INFO_SEARCH_FORWARD, + CEC_DECK_INFO_SEARCH_REVERSE, + CEC_DECK_INFO_NO_MEDIA, + CEC_DECK_INFO_STOP, + CEC_DECK_INFO_WIND, + CEC_DECK_INFO_REWIND, + CEC_DECK_IDX_SEARCH_FORWARD, + CEC_DECK_IDX_SEARCH_REVERSE, + CEC_DECK_OTHER_STATUS, +) = range(0x11, 0x20) + +CEC_DECK_CTRL_MODE_T = ct.c_uint32 # enum +( + CEC_DECK_CTRL_FORWARD, + CEC_DECK_CTRL_BACKWARD, + CEC_DECK_CTRL_STOP, + CEC_DECK_CTRL_EJECT, +) = range(1, 5) + +CEC_PLAY_MODE_T = ct.c_uint32 # enum +CEC_PLAY_FORWARD = 0x24 +CEC_PLAY_REVERSE = 0x20 +CEC_PLAY_STILL = 0x25 +CEC_PLAY_SCAN_FORWARD_MIN_SPEED = 0x05 +CEC_PLAY_SCAN_FORWARD_MED_SPEED = 0x06 +CEC_PLAY_SCAN_FORWARD_MAX_SPEED = 0x07 +CEC_PLAY_SCAN_REVERSE_MIN_SPEED = 0x09 +CEC_PLAY_SCAN_REVERSE_MED_SPEED = 0x0A +CEC_PLAY_SCAN_REVERSE_MAX_SPEED = 0x0B +CEC_PLAY_SLOW_FORWARD_MIN_SPEED = 0x15 +CEC_PLAY_SLOW_FORWARD_MED_SPEED = 0x16 +CEC_PLAY_SLOW_FORWARD_MAX_SPEED = 0x17 +CEC_PLAY_SLOW_REVERSE_MIN_SPEED = 0x19 +CEC_PLAY_SLOW_REVERSE_MED_SPEED = 0x1A +CEC_PLAY_SLOW_REVERSE_MAX_SPEED = 0x1B + +CEC_DECK_STATUS_REQUEST_T = ct.c_uint32 # enum +( + CEC_DECK_STATUS_ON, + CEC_DECK_STATUS_OFF, + CEC_DECK_STATUS_ONCE, +) = range(1, 4) + +CEC_USER_CONTROL_T = ct.c_uint32 # enum +CEC_User_Control_Select = 0x00 +CEC_User_Control_Up = 0x01 +CEC_User_Control_Down = 0x02 +CEC_User_Control_Left = 0x03 +CEC_User_Control_Right = 0x04 +CEC_User_Control_RightUp = 0x05 +CEC_User_Control_RightDown = 0x06 +CEC_User_Control_LeftUp = 0x07 +CEC_User_Control_LeftDown = 0x08 +CEC_User_Control_RootMenu = 0x09 +CEC_User_Control_SetupMenu = 0x0A +CEC_User_Control_ContentsMenu = 0x0B +CEC_User_Control_FavoriteMenu = 0x0C +CEC_User_Control_Exit = 0x0D +CEC_User_Control_Number0 = 0x20 +CEC_User_Control_Number1 = 0x21 +CEC_User_Control_Number2 = 0x22 +CEC_User_Control_Number3 = 0x23 +CEC_User_Control_Number4 = 0x24 +CEC_User_Control_Number5 = 0x25 +CEC_User_Control_Number6 = 0x26 +CEC_User_Control_Number7 = 0x27 +CEC_User_Control_Number8 = 0x28 +CEC_User_Control_Number9 = 0x29 +CEC_User_Control_Dot = 0x2A +CEC_User_Control_Enter = 0x2B +CEC_User_Control_Clear = 0x2C +CEC_User_Control_ChannelUp = 0x30 +CEC_User_Control_ChannelDown = 0x31 +CEC_User_Control_PreviousChannel = 0x32 +CEC_User_Control_SoundSelect = 0x33 +CEC_User_Control_InputSelect = 0x34 +CEC_User_Control_DisplayInformation = 0x35 +CEC_User_Control_Help = 0x36 +CEC_User_Control_PageUp = 0x37 +CEC_User_Control_PageDown = 0x38 +CEC_User_Control_Power = 0x40 +CEC_User_Control_VolumeUp = 0x41 +CEC_User_Control_VolumeDown = 0x42 +CEC_User_Control_Mute = 0x43 +CEC_User_Control_Play = 0x44 +CEC_User_Control_Stop = 0x45 +CEC_User_Control_Pause = 0x46 +CEC_User_Control_Record = 0x47 +CEC_User_Control_Rewind = 0x48 +CEC_User_Control_FastForward = 0x49 +CEC_User_Control_Eject = 0x4A +CEC_User_Control_Forward = 0x4B +CEC_User_Control_Backward = 0x4C +CEC_User_Control_Angle = 0x50 +CEC_User_Control_Subpicture = 0x51 +CEC_User_Control_VideoOnDemand = 0x52 +CEC_User_Control_EPG = 0x53 +CEC_User_Control_TimerProgramming = 0x54 +CEC_User_Control_InitialConfig = 0x55 +CEC_User_Control_PlayFunction = 0x60 +CEC_User_Control_PausePlayFunction = 0x61 +CEC_User_Control_RecordFunction = 0x62 +CEC_User_Control_PauseRecordFunction = 0x63 +CEC_User_Control_StopFunction = 0x64 +CEC_User_Control_MuteFunction = 0x65 +CEC_User_Control_RestoreVolumeFunction = 0x66 +CEC_User_Control_TuneFunction = 0x67 +CEC_User_Control_SelectDiskFunction = 0x68 +CEC_User_Control_SelectAVInputFunction = 0x69 +CEC_User_Control_SelectAudioInputFunction = 0x6A +CEC_User_Control_F1Blue = 0x71 +CEC_User_Control_F2Red = 0x72 +CEC_User_Control_F3Green = 0x73 +CEC_User_Control_F4Yellow = 0x74 +CEC_User_Control_F5 = 0x75 + +class VC_CEC_TOPOLOGY_T(ct.Structure): + _fields_ = [ + ('active_mask', ct.c_uint16), + ('num_devices', ct.c_uint16), + ('device_attr', ct.c_uint32 * 16), + ] + +class VC_CEC_MESSAGE_T(ct.Structure): + _fields_ = [ + ('length', ct.c_uint32), + ('initiator', CEC_AllDevices_T), + ('follower', CEC_AllDevices_T), + ('payload', ct.c_uint8 * (CEC_MAX_XMIT_LENGTH + 1)), + ] + +VC_CEC_NOTIFY_T = ct.c_uint32 # enum +VC_CEC_NOTIFY_NONE = 0 +VC_CEC_TX = 1 << 0 +VC_CEC_RX = 1 << 1 +VC_CEC_BUTTON_PRESSED = 1 << 2 +VC_CEC_BUTTON_RELEASE = 1 << 3 +VC_CEC_REMOTE_PRESSED = 1 << 4 +VC_CEC_REMOTE_RELEASE = 1 << 5 +VC_CEC_LOGICAL_ADDR = 1 << 6 +VC_CEC_TOPOLOGY = 1 << 7 +VC_CEC_LOGICAL_ADDR_LOST = 1 << 15 + +CEC_CALLBACK_T = ct.CFUNCTYPE( + None, + ct.c_void_p, ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32) + +CEC_CB_REASON = lambda x: x & 0xFFFF +CEC_CB_MSG_LENGTH = lambda x: (x >> 16) & 0xFF +CEC_CB_RC = lambda x: (x >> 24) & 0xFF + +CEC_CB_INITIATOR = lambda x: (x >> 4) & 0xF +CEC_CB_FOLLOWER = lambda x: x & 0xF +CEC_CB_OPCODE = lambda x: (x >> 8) & 0xFF +CEC_CB_OPERAND1 = lambda x: (x >> 16) & 0xFF +CEC_CB_OPERAND2 = lambda x: (x >> 24) & 0xFF + +VC_CEC_ERROR_T = ct.c_uint32 # enum +( + VC_CEC_SUCCESS, + VC_CEC_ERROR_NO_ACK, + VC_CEC_ERROR_SHUTDOWN, + VC_CEC_ERROR_BUSY, + VC_CEC_ERROR_NO_LA, + VC_CEC_ERROR_NO_PA, + VC_CEC_ERROR_NO_TOPO, + VC_CEC_ERROR_INVALID_FOLLOWER, + VC_CEC_ERROR_INVALID_ARGUMENT, +) = range(9) + +# vc_cecservice.h ############################################################ + +CECSERVICE_CALLBACK_T = ct.CFUNCTYPE( + None, + ct.c_void_p, ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32) + +vc_vchi_cec_init = _lib.vc_vchi_cec_init +vc_vchi_cec_init.argtypes = [VCHI_INSTANCE_T, ct.POINTER(ct.POINTER(VCHI_CONNECTION_T)), ct.c_uint32] +vc_vchi_cec_init.restype = None + +vc_vchi_cec_stop = _lib.vc_vchi_cec_stop +vc_vchi_cec_stop.argtypes = [] +vc_vchi_cec_stop.restype = None + +vc_cec_register_callback = _lib.vc_cec_register_callback +vc_cec_register_callback.argtypes = [CECSERVICE_CALLBACK_T, ct.c_void_p] +vc_cec_register_callback.restype = None + +vc_cec_register_command = _lib.vc_cec_register_command +vc_cec_register_command.argtypes = [CEC_OPCODE_T] +vc_cec_register_command.restype = ct.c_int + +vc_cec_register_all = _lib.vc_cec_register_all +vc_cec_register_all.argtypes = [] +vc_cec_register_all.restype = ct.c_int + +vc_cec_deregister_command = _lib.vc_cec_deregister_command +vc_cec_deregister_command.argtypes = [CEC_OPCODE_T] +vc_cec_deregister_command.restype = ct.c_int + +vc_cec_deregister_all = _lib.vc_cec_deregister_all +vc_cec_deregister_all.argtypes = [] +vc_cec_deregister_all.restype = ct.c_int + +vc_cec_send_message = _lib.vc_cec_send_message +vc_cec_send_message.argtypes = [ct.c_uint32, ct.POINTER(ct.c_uint8), ct.c_uint32, vcos_bool_t] +vc_cec_send_message.restype = ct.c_int + +vc_cec_get_logical_address = _lib.vc_cec_get_logical_address +vc_cec_get_logical_address.argtypes = [ct.POINTER(CEC_AllDevices_T)] +vc_cec_get_logical_address.restype = ct.c_int + +vc_cec_alloc_logical_address = _lib.vc_cec_alloc_logical_address +vc_cec_alloc_logical_address.argtypes = [] +vc_cec_alloc_logical_address.restype = ct.c_int + +vc_cec_release_logical_address = _lib.vc_cec_release_logical_address +vc_cec_release_logical_address.argtypes = [] +vc_cec_release_logical_address.restype = ct.c_int + +vc_cec_get_topology = _lib.vc_cec_get_topology +vc_cec_get_topology.argtypes = [ct.POINTER(VC_CEC_TOPOLOGY_T)] +vc_cec_get_topology.restype = ct.c_int + +vc_cec_set_vendor_id = _lib.vc_cec_set_vendor_id +vc_cec_set_vendor_id.argtypes = [ct.c_uint32] +vc_cec_set_vendor_id.restype = ct.c_int + +vc_cec_set_osd_name = _lib.vc_cec_set_osd_name +vc_cec_set_osd_name.argtypes = [ct.c_char_p] +vc_cec_set_osd_name.restype = ct.c_int + +vc_cec_get_physical_address = _lib.vc_cec_get_physical_address +vc_cec_get_physical_address.argtypes = [ct.POINTER(ct.c_uint16)] +vc_cec_get_physical_address.restype = ct.c_int + +vc_cec_get_vendor_id = _lib.vc_cec_get_vendor_id +vc_cec_get_vendor_id.argtypes = [CEC_AllDevices_T, ct.POINTER(ct.c_uint32)] +vc_cec_get_vendor_id.restype = ct.c_int + +vc_cec_device_type = _lib.vc_cec_device_type +vc_cec_device_type.argtypes = [CEC_AllDevices_T] +vc_cec_device_type.restype = CEC_DEVICE_TYPE_T + +vc_cec_send_message2 = _lib.vc_cec_send_message2 +vc_cec_send_message2.argtypes = [ct.POINTER(VC_CEC_MESSAGE_T)] +vc_cec_send_message2.restype = ct.c_int + +vc_cec_param2message = _lib.vc_cec_param2message +vc_cec_param2message.argtypes = [ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.POINTER(VC_CEC_MESSAGE_T)] +vc_cec_param2message.restype = ct.c_int + +vc_cec_poll_address = _lib.vc_cec_poll_address +vc_cec_poll_address.argtypes = [CEC_AllDevices_T] +vc_cec_poll_address.restype = ct.c_int + +vc_cec_set_logical_address = _lib.vc_cec_set_logical_address +vc_cec_set_logical_address.argtypes = [CEC_AllDevices_T, CEC_DEVICE_TYPE_T, ct.c_uint32] +vc_cec_set_logical_address.restype = ct.c_int + +vc_cec_add_device = _lib.vc_cec_add_device +vc_cec_add_device.argtypes = [CEC_AllDevices_T, ct.c_uint16, CEC_DEVICE_TYPE_T, vcos_bool_t] +vc_cec_add_device.restype = ct.c_int + +vc_cec_set_passive = _lib.vc_cec_set_passive +vc_cec_set_passive.argtypes = [vcos_bool_t] +vc_cec_set_passive.restype = ct.c_int + +vc_cec_send_FeatureAbort = _lib.vc_cec_send_FeatureAbort +vc_cec_send_FeatureAbort.argtypes = [ct.c_uint32, CEC_OPCODE_T, CEC_ABORT_REASON_T] +vc_cec_send_FeatureAbort.restype = ct.c_int + +vc_cec_send_ActiveSource = _lib.vc_cec_send_ActiveSource +vc_cec_send_ActiveSource.argtypes = [ct.c_uint16, vcos_bool_t] +vc_cec_send_ActiveSource.restype = ct.c_int + +vc_cec_send_ImageViewOn = _lib.vc_cec_send_ImageViewOn +vc_cec_send_ImageViewOn.argtypes = [ct.c_uint32, vcos_bool_t] +vc_cec_send_ImageViewOn.restype = ct.c_int + +vc_cec_send_SetOSDString = _lib.vc_cec_send_SetOSDString +vc_cec_send_SetOSDString.argtypes = [ct.c_uint32, CEC_DISPLAY_CONTROL_T, ct.c_char_p, vcos_bool_t] +vc_cec_send_SetOSDString.restype = ct.c_int + +vc_cec_send_Standby = _lib.vc_cec_send_Standby +vc_cec_send_Standby.argtypes = [ct.c_uint32, vcos_bool_t] +vc_cec_send_Standby.restype = ct.c_int + +vc_cec_send_MenuStatus = _lib.vc_cec_send_MenuStatus +vc_cec_send_MenuStatus.argtypes = [ct.c_uint32, CEC_MENU_STATE_T, vcos_bool_t] +vc_cec_send_MenuStatus.restype = ct.c_int + +vc_cec_send_ReportPhysicalAddress = _lib.vc_cec_send_ReportPhysicalAddress +vc_cec_send_ReportPhysicalAddress.argtypes = [ct.c_uint16, CEC_DEVICE_TYPE_T, vcos_bool_t] +vc_cec_send_ReportPhysicalAddress.restype = ct.c_int + +# vc_gencmd.h ################################################################ + +vc_gencmd_init = _lib.vc_gencmd_init +vc_gencmd_init.argtypes = [] +vc_gencmd_init.restype = ct.c_int + +vc_gencmd_stop = _lib.vc_gencmd_stop +vc_gencmd_stop.argtypes = [] +vc_gencmd_stop.restype = None + +vc_gencmd_send = _lib.vc_gencmd_send +vc_gencmd_send.argtypes = [ct.c_char_p] +vc_gencmd_send.restype = ct.c_int + +vc_gencmd_read_response = _lib.vc_gencmd_read_response +vc_gencmd_read_response.argtypes = [ct.c_char_p, ct.c_int] +vc_gencmd_read_response.restype = ct.c_int + +vc_gencmd = _lib.vc_gencmd +vc_gencmd.argtypes = [ct.c_char_p, ct.c_int, ct.c_char_p] +vc_gencmd.restype = ct.c_int + +vc_gencmd_string_property = _lib.vc_gencmd_string_property +vc_gencmd_string_property.argtypes = [ct.c_char_p, ct.c_char_p, ct.POINTER(ct.c_char_p), ct.POINTER(ct.c_int)] +vc_gencmd_string_property.restype = ct.c_int + +vc_gencmd_number_property = _lib.vc_gencmd_number_property +vc_gencmd_number_property.argtypes = [ct.c_char_p, ct.c_char_p, ct.POINTER(ct.c_int)] +vc_gencmd_number_property.restype = ct.c_int + +vc_gencmd_until = _lib.vc_gencmd_until +vc_gencmd_until.argtypes = [ct.c_char_p, ct.c_char_p, ct.c_char_p, ct.c_char_p, ct.c_int] +vc_gencmd_until.restype = ct.c_int + diff --git a/picamera/camera.py b/picamera/camera.py new file mode 100644 index 0000000..a7da186 --- /dev/null +++ b/picamera/camera.py @@ -0,0 +1,4165 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python camera library for the Rasperry-Pi camera module +# Copyright (c) 2013-2017 Dave Jones +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str equivalent to Py3's +str = type('') + +import warnings +import datetime +import mimetypes +import ctypes as ct +import threading +from fractions import Fraction +from operator import itemgetter, and_ +from functools import reduce +from collections import namedtuple + +from . import bcm_host, mmal, mmalobj as mo +from .exc import ( + PiCameraError, + PiCameraValueError, + PiCameraRuntimeError, + PiCameraClosed, + PiCameraNotRecording, + PiCameraAlreadyRecording, + PiCameraMMALError, + PiCameraDeprecated, + PiCameraFallback, + ) +from .encoders import ( + PiVideoFrame, + PiVideoEncoder, + PiRawVideoEncoder, + PiCookedVideoEncoder, + PiRawOneImageEncoder, + PiRawMultiImageEncoder, + PiCookedOneImageEncoder, + PiCookedMultiImageEncoder, + ) +from .renderers import ( + PiPreviewRenderer, + PiOverlayRenderer, + PiNullSink, + ) +from .color import Color + +try: + from RPi import GPIO +except ImportError: + # Can't find RPi.GPIO so just null-out the reference + GPIO = None + + +def docstring_values(values, indent=8): + """ + Formats a dictionary of values for inclusion in a docstring. + """ + return ('\n' + ' ' * indent).join( + "* ``'%s'``" % k + for (k, v) in + sorted(values.items(), key=itemgetter(1))) + + +class PiCameraMaxResolution(object): + """ + Singleton representing the maximum resolution of the camera module. + """ +PiCameraMaxResolution = PiCameraMaxResolution() + + +class PiCameraMaxFramerate(object): + """ + Singleton representing the maximum framerate of the camera module. + """ +PiCameraMaxFramerate = PiCameraMaxFramerate() + + +PiCameraConfig = namedtuple('PiCameraConfig', ( + 'sensor_mode', + 'clock_mode', + 'resolution', + 'framerate', + 'isp_blocks', + 'colorspace', +)) + + +class PiCamera(object): + """ + Provides a pure Python interface to the Raspberry Pi's camera module. + + Upon construction, this class initializes the camera. The *camera_num* + parameter (which defaults to 0) selects the camera module that the instance + will represent. Only the Raspberry Pi compute module currently supports + more than one camera. + + The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, + *clock_mode*, and *isp_blocks* parameters provide initial values for the + :attr:`sensor_mode`, :attr:`resolution`, :attr:`framerate`, + :attr:`framerate_range`, :attr:`clock_mode`, and :attr:`isp_blocks` + attributes of the class (these attributes are all relatively expensive to + set individually, hence setting them all upon construction is a speed + optimization). Please refer to the attribute documentation for more + information and default values. + + The *stereo_mode* and *stereo_decimate* parameters configure dual cameras + on a compute module for sterescopic mode. These parameters can only be set + at construction time; they cannot be altered later without closing the + :class:`PiCamera` instance and recreating it. The *stereo_mode* parameter + defaults to ``'none'`` (no stereoscopic mode) but can be set to + ``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If + the *stereo_decimate* parameter is ``True``, the resolution of the two + cameras will be halved so that the resulting image has the same dimensions + as if stereoscopic mode were not being used. + + The *led_pin* parameter can be used to specify the GPIO pin which should be + used to control the camera's LED via the :attr:`led` attribute. If this is + not specified, it should default to the correct value for your Pi platform. + You should only need to specify this parameter if you are using a custom + DeviceTree blob (this is only typical on the `Compute Module`_ platform). + + No preview or recording is started automatically upon construction. Use + the :meth:`capture` method to capture images, the :meth:`start_recording` + method to begin recording video, or the :meth:`start_preview` method to + start live display of the camera's input. + + Several attributes are provided to adjust the camera's configuration. Some + of these can be adjusted while a recording is running, like + :attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted + when the camera is idle. + + When you are finished with the camera, you should ensure you call the + :meth:`close` method to release the camera resources:: + + camera = PiCamera() + try: + # do something with the camera + pass + finally: + camera.close() + + The class supports the context manager protocol to make this particularly + easy (upon exiting the :keyword:`with` statement, the :meth:`close` method + is automatically called):: + + with PiCamera() as camera: + # do something with the camera + pass + + .. versionchanged:: 1.8 + Added *stereo_mode* and *stereo_decimate* parameters. + + .. versionchanged:: 1.9 + Added *resolution*, *framerate*, and *sensor_mode* parameters. + + .. versionchanged:: 1.10 + Added *led_pin* parameter. + + .. versionchanged:: 1.11 + Added *clock_mode* parameter, and permitted setting of resolution as + appropriately formatted string. + + .. versionchanged:: 1.13 + Added *framerate_range* parameter. + + .. versionchanged:: 1.14 + Positional arguments are now deprecated; all arguments to the + constructor should be specified as keyword-args. + + .. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md + """ + + CAMERA_PREVIEW_PORT = 0 + CAMERA_VIDEO_PORT = 1 + CAMERA_CAPTURE_PORT = 2 + MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__ + MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__ + DEFAULT_ANNOTATE_SIZE = 32 + CAPTURE_TIMEOUT = 60 + + SENSOR_MODES = { + 'ov5647': { + 1: mo.PiSensorMode('1080p', (1, 30), full_fov=False), + 2: mo.PiSensorMode('2592x1944', (1, 15), still=True), + 3: mo.PiSensorMode('2592x1944', (1/6, 1), still=True), + 4: mo.PiSensorMode('1296x972', (1, 42)), + 5: mo.PiSensorMode('1296x730', (1, 49)), + 6: mo.PiSensorMode('VGA', (42, 60)), + 7: mo.PiSensorMode('VGA', (60, 90)), + }, + 'imx219': { + 1: mo.PiSensorMode('1080p', (1/10, 30), full_fov=False), + 2: mo.PiSensorMode('3280x2464', (1/10, 15), still=True), + 3: mo.PiSensorMode('3280x2464', (1/10, 15), still=True), + 4: mo.PiSensorMode('1640x1232', (1/10, 40)), + 5: mo.PiSensorMode('1640x922', (1/10, 40)), + 6: mo.PiSensorMode('720p', (40, 90), full_fov=False), + 7: mo.PiSensorMode('VGA', (40, 90), full_fov=False), + }, + } + + METER_MODES = { + 'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE, + 'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT, + 'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT, + 'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX, + } + + EXPOSURE_MODES = { + 'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF, + 'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO, + 'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT, + 'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW, + 'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT, + 'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT, + 'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS, + 'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW, + 'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH, + 'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG, + 'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS, + 'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE, + 'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS, + } + + FLASH_MODES = { + 'off': mmal.MMAL_PARAM_FLASH_OFF, + 'auto': mmal.MMAL_PARAM_FLASH_AUTO, + 'on': mmal.MMAL_PARAM_FLASH_ON, + 'redeye': mmal.MMAL_PARAM_FLASH_REDEYE, + 'fillin': mmal.MMAL_PARAM_FLASH_FILLIN, + 'torch': mmal.MMAL_PARAM_FLASH_TORCH, + } + + AWB_MODES = { + 'off': mmal.MMAL_PARAM_AWBMODE_OFF, + 'auto': mmal.MMAL_PARAM_AWBMODE_AUTO, + 'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT, + 'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY, + 'shade': mmal.MMAL_PARAM_AWBMODE_SHADE, + 'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN, + 'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT, + 'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT, + 'flash': mmal.MMAL_PARAM_AWBMODE_FLASH, + 'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON, + 'greyworld': mmal.MMAL_PARAM_AWBMODE_GREYWORLD, + } + + IMAGE_EFFECTS = { + 'none': mmal.MMAL_PARAM_IMAGEFX_NONE, + 'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE, + 'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE, + # The following don't work + #'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE, + #'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD, + #'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD, + 'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH, + 'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE, + 'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS, + 'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT, + 'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH, + 'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN, + 'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL, + 'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR, + 'film': mmal.MMAL_PARAM_IMAGEFX_FILM, + 'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR, + 'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION, + 'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP, + 'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT, + 'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE, + 'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT, + 'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE, + 'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON, + 'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE, + 'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV, + } + + DRC_STRENGTHS = { + 'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF, + 'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW, + 'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM, + 'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH, + } + + RAW_FORMATS = { + 'yuv', + 'rgb', + 'rgba', + 'bgr', + 'bgra', + } + + STEREO_MODES = { + 'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE, + 'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE, + 'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM, + } + + CLOCK_MODES = { + 'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC, + 'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC, + } + + ISP_BLOCKS = { + 'black-level': 1 << 2, + 'lens-shading': 1 << 3, + 'white-balance': 1 << 5, + 'bad-pixel': 1 << 7, + 'crosstalk': 1 << 9, + 'demosaic': 1 << 11, + 'gamma': 1 << 18, + 'sharpening': 1 << 22, + } + + COLORSPACES = { + 'auto': mmal.MMAL_COLOR_SPACE_UNKNOWN, + 'jfif': mmal.MMAL_COLOR_SPACE_JPEG_JFIF, + 'bt601': mmal.MMAL_COLOR_SPACE_ITUR_BT601, + 'bt709': mmal.MMAL_COLOR_SPACE_ITUR_BT709, + } + + _METER_MODES_R = {v: k for (k, v) in METER_MODES.items()} + _EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()} + _FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()} + _AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()} + _IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()} + _DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()} + _STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()} + _CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()} + _ISP_BLOCKS_R = {v: k for (k, v) in ISP_BLOCKS.items()} + _COLORSPACES_R = {v: k for (k, v) in COLORSPACES.items()} + + __slots__ = ( + '_used_led', + '_led_pin', + '_camera', + '_camera_config', + '_camera_exception', + '_revision', + '_preview', + '_preview_alpha', + '_preview_layer', + '_preview_fullscreen', + '_preview_window', + '_splitter', + '_splitter_connection', + '_encoders_lock', + '_encoders', + '_overlays', + '_raw_format', + '_image_effect_params', + '_exif_tags', + ) + + def __init__(self, *args, **kwargs): + options = self._parse_options(args, kwargs) + self._camera = None + self._camera_config = None + self._camera_exception = None + self._preview = None + self._preview_alpha = 255 + self._preview_layer = 2 + self._preview_fullscreen = True + self._preview_window = None + self._splitter = None + self._splitter_connection = None + self._encoders_lock = threading.Lock() + self._encoders = {} + self._overlays = [] + self._raw_format = 'yuv' + self._image_effect_params = None + self._exif_tags = {} + self._used_led = None + self._led_pin = None + bcm_host.bcm_host_init() + try: + self._init_revision(options) + old_config, new_config = self._init_config(options) + self._init_led(options) + self._init_camera(options) + self._configure_camera(old_config, new_config) + self._init_preview() + self._init_splitter() + self._camera.enable() + self._init_defaults() + except: + self.close() + raise + else: + mimetypes.add_type('application/h264', '.h264', False) + mimetypes.add_type('application/mjpeg', '.mjpg', False) + mimetypes.add_type('application/mjpeg', '.mjpeg', False) + + @staticmethod + def _parse_options(args, kwargs): + """ + Parse the constructor options. + + In future versions we'll only support keyword args; for now (for + backwards compatibility) we'll allow the positional args that we + previously accepted but raise a deprecation warning for each. + """ + options = { # with defaults + 'camera_num': 0, + 'stereo_mode': 'none', + 'stereo_decimate': False, + 'resolution': None, + 'framerate': None, + 'sensor_mode': 0, + 'led_pin': None, + 'clock_mode': 'reset', + 'framerate_range': None, + 'isp_blocks': None, + 'colorspace': 'auto', + } + arg_names = ( + 'camera_num', + 'stereo_mode', + 'stereo_decimate', + 'resolution', + 'framerate', + 'sensor_mode', + 'led_pin', + 'clock_mode', + 'framerate_range', + ) + for arg_name, arg in zip(arg_names, args): + warnings.warn( + PiCameraDeprecated( + 'Specifying %s as a non-keyword argument is ' + 'deprecated' % arg_name)) + options[arg_name] = arg + for arg_name in options: + options[arg_name] = kwargs.pop(arg_name, options[arg_name]) + if kwargs: + raise TypeError( + 'PiCamera.__init__ got an unexpected keyword ' + 'argument %r' % kwargs.popitem()[0]) + return options + + def _init_revision(self, options): + """ + Query the firmware for the attached camera revision; older firmwares + can't return the revision but only support the OV5647 sensor so we can + assume that revision in such a case. This is also where the placeholder + objects for MAX_RESOLUTION and MAX_FRAMERATE are replaced with their + actual values + """ + with mo.MMALCameraInfo() as camera_info: + camera_num = options['camera_num'] + info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO] + revision = 'ov5647' + if camera_info.info_rev > 1: + revision = info.cameras[camera_num].camera_name.decode('ascii') + if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution: + PiCamera.MAX_RESOLUTION = mo.PiResolution( + info.cameras[camera_num].max_width, + info.cameras[camera_num].max_height, + ) + if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate: + if revision.lower() == 'ov5647': + PiCamera.MAX_FRAMERATE = 90 + else: + PiCamera.MAX_FRAMERATE = 120 + self._revision = revision + + @classmethod + def _init_config(cls, options): + """ + Construct initial and desired configurations to pass to the + :meth:`_configure_camera` method. The initial configuration is mostly + hard-coded defaults. The desired configuration comes from the specified + options. + """ + if options['resolution'] is None: + # Get screen resolution + w = ct.c_uint32() + h = ct.c_uint32() + if bcm_host.graphics_get_display_size(0, w, h) == -1: + w = 1280 + h = 720 + else: + w = int(w.value) + h = int(h.value) + resolution = mo.PiResolution(w, h) + elif options['resolution'] is PiCameraMaxResolution: + resolution = cls.MAX_RESOLUTION + else: + resolution = mo.to_resolution(options['resolution']) + + if options['framerate_range'] is None: + if options['framerate'] is None: + framerate = 30 + elif options['framerate'] is PiCameraMaxFramerate: + framerate = cls.MAX_FRAMERATE + else: + framerate = mo.to_fraction(options['framerate']) + elif options['framerate'] is not None: + raise PiCameraValueError( + "Can't specify framerate and framerate_range") + else: + try: + low, high = options['framerate_range'] + except TypeError: + raise PiCameraValueError( + "framerate_range must have (low, high) values") + if low is PiCameraMaxFramerate: + low = cls.MAX_FRAMERATE + if high is PiCameraMaxFramerate: + high = cls.MAX_FRAMERATE + framerate = (mo.to_fraction(low), mo.to_fraction(high)) + + try: + clock_mode = cls.CLOCK_MODES[options['clock_mode']] + except KeyError: + raise PiCameraValueError( + 'Invalid clock mode: %s' % options['clock_mode']) + + try: + colorspace = cls.COLORSPACES[options['colorspace']] + except KeyError: + raise PiCameraValueError( + 'Invalid colorspace: %s' % options['colorspace']) + + all_blocks = set(cls.ISP_BLOCKS.keys()) + if options['isp_blocks'] is None: + isp_blocks = 0 + else: + isp_blocks = set(options['isp_blocks']) + invalid = isp_blocks - all_blocks + if invalid: + raise PiCameraValueError( + 'Invalid ISP block: %s' % invalid.pop()) + isp_blocks = reduce(and_, (~v for k, v in cls.ISP_BLOCKS.items() + if k not in isp_blocks), 0xFFFFFFFF) + + old_config = PiCameraConfig( + sensor_mode=0, + clock_mode=clock_mode, + resolution=cls.MAX_RESOLUTION, + framerate=30, + isp_blocks=0, + colorspace=mmal.MMAL_COLOR_SPACE_UNKNOWN) + new_config = PiCameraConfig( + sensor_mode=options['sensor_mode'], + clock_mode=clock_mode, + resolution=resolution, + framerate=framerate, + isp_blocks=isp_blocks, + colorspace=colorspace) + return old_config, new_config + + def _init_led(self, options): + """ + Determine the GPIO pin to use for controlling the camera's LED, if any. + """ + led_pin = options['led_pin'] + if GPIO and led_pin is None: + try: + led_pin = { + (0, 0): 2, # compute module (default for cam 0) + (0, 1): 30, # compute module (default for cam 1) + (1, 0): 5, # Pi 1 model B rev 1 + (2, 0): 5, # Pi 1 model B rev 2 or model A + (3, 0): 32, # Pi 1 model B+ or Pi 2 model B + }[(GPIO.RPI_REVISION, options['camera_num'])] + except KeyError: + raise PiCameraError( + 'Unable to determine default GPIO LED pin for RPi ' + 'revision %d and camera num %d' % ( + GPIO.RPI_REVISION, options['camera_num'])) + self._used_led = False + self._led_pin = led_pin + + def _init_camera(self, options): + """ + Construct the MMAL camera component and perform all early configuration + on it (e.g. most stereoscopic configuration has to be done before the + camera is activated). + """ + try: + stereo_mode = self.STEREO_MODES[options['stereo_mode']] + except KeyError: + raise PiCameraValueError( + 'Invalid stereo mode: %s' % options['stereo_mode']) + try: + self._camera = mo.MMALCamera() + except PiCameraMMALError as e: + if e.status == mmal.MMAL_ENOMEM: + raise PiCameraError( + "Camera is not enabled. Try running 'sudo raspi-config' " + "and ensure that the camera has been enabled.") + else: + raise + self._camera_config = self._camera.control.params[ + mmal.MMAL_PARAMETER_CAMERA_CONFIG] + # Don't attempt to set this if stereo mode isn't requested as it'll + # break compatibility on older firmwares + if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE: + for p in self._camera.outputs: + mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T( + mmal.MMAL_PARAMETER_HEADER_T( + mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE, + ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T), + ), + mode=stereo_mode, + decimate=options['stereo_decimate'], + swap_eyes=False, + ) + p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp + # Must be done *after* stereo-scopic setting + self._camera.control.params[ + mmal.MMAL_PARAMETER_CAMERA_NUM] = options['camera_num'] + + def _init_defaults(self): + """ + Sets most camera settings to various default values. + """ + self._exif_tags = { + 'IFD0.Model': 'RP_%s' % self.revision, + 'IFD0.Make': 'RaspberryPi', + } + self.sharpness = 0 + self.contrast = 0 + self.brightness = 50 + self.saturation = 0 + self.iso = 0 # auto + self.video_stabilization = False + self.exposure_compensation = 0 + self.exposure_mode = 'auto' + self.meter_mode = 'average' + self.awb_mode = 'auto' + self.image_effect = 'none' + self.color_effects = None + self.rotation = 0 + self.hflip = self.vflip = False + self.zoom = (0.0, 0.0, 1.0, 1.0) + + def _init_splitter(self): + """ + Create a splitter component for the video port. This is to permit video + recordings and captures where use_video_port=True to occur + simultaneously (#26) + """ + self._splitter = mo.MMALSplitter() + self._splitter.inputs[0].connect( + self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable() + + def _init_preview(self): + """ + Create a null-sink component, enable it and connect it to the camera's + preview port. If nothing is connected to the preview port, the camera + doesn't measure exposure and captured images gradually fade to black + (issue #22; subsequently fixed in firmware but there's no harm in + leaving this in place for the sake of backwards compat). + """ + self._preview = PiNullSink( + self, self._camera.outputs[self.CAMERA_PREVIEW_PORT]) + + def _start_capture(self, port): + """ + Starts the camera capturing frames. + + This method starts the camera feeding frames to any attached encoders, + but only enables capture if the port is the camera's still port, or if + there's a single active encoder on the video splitter. + """ + if ( + port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or + len([e for e in self._encoders.values() if e.active]) == 1): + port.params[mmal.MMAL_PARAMETER_CAPTURE] = True + + def _stop_capture(self, port): + """ + Stops the camera capturing frames. + + This method stops the camera feeding frames to any attached encoders, + but only disables capture if the port is the camera's still port, or if + there's a single active encoder on the video splitter. + """ + if ( + port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or + len([e for e in self._encoders.values() if e.active]) == 1): + port.params[mmal.MMAL_PARAMETER_CAPTURE] = False + + def _check_camera_open(self): + """ + Raise an exception if the camera is already closed, or if the camera + has encountered a fatal error. + """ + exc, self._camera_exception = self._camera_exception, None + if exc: + raise exc + if self.closed: + raise PiCameraClosed("Camera is closed") + + def _check_recording_stopped(self): + """ + Raise an exception if the camera is currently recording. + """ + if self.recording: + raise PiCameraRuntimeError("Recording is currently running") + + def _get_ports(self, from_video_port, splitter_port): + """ + Determine the camera and output ports for given capture options. + + See :ref:`camera_hardware` for more information on picamera's usage of + camera, splitter, and encoder ports. The general idea here is that the + capture (still) port operates on its own, while the video port is + always connected to a splitter component, so requests for a video port + also have to specify which splitter port they want to use. + """ + self._check_camera_open() + if from_video_port and (splitter_port in self._encoders): + raise PiCameraAlreadyRecording( + 'The camera is already using port %d ' % splitter_port) + camera_port = ( + self._camera.outputs[self.CAMERA_VIDEO_PORT] + if from_video_port else + self._camera.outputs[self.CAMERA_CAPTURE_PORT] + ) + output_port = ( + self._splitter.outputs[splitter_port] + if from_video_port else + camera_port + ) + return (camera_port, output_port) + + def _get_output_format(self, output): + """ + Given an output object, attempt to determine the requested format. + + We attempt to determine the filename of the *output* object and derive + a MIME type from the extension. If *output* has no filename, an error + is raised. + """ + if isinstance(output, bytes): + filename = output.decode('utf-8') + elif isinstance(output, str): + filename = output + else: + try: + filename = output.name + except AttributeError: + raise PiCameraValueError( + 'Format must be specified when output has no filename') + type, encoding = mimetypes.guess_type(filename, strict=False) + if not type: + raise PiCameraValueError( + 'Unable to determine type from filename %s' % filename) + return type + + def _get_image_format(self, output, format=None): + """ + Given an output object and an optional format, attempt to determine the + requested image format. + + This method is used by all capture methods to determine the requested + output format. If *format* is specified as a MIME-type the "image/" + prefix is stripped. If *format* is not specified, then + :meth:`_get_output_format` will be called to attempt to determine + format from the *output* object. + """ + if isinstance(format, bytes): + format = format.decode('utf-8') + format = format or self._get_output_format(output) + format = ( + format[6:] if format.startswith('image/') else + format) + if format == 'x-ms-bmp': + format = 'bmp' + if format == 'raw': + format = self.raw_format + return format + + def _get_video_format(self, output, format=None): + """ + Given an output object and an optional format, attempt to determine the + requested video format. + + This method is used by all recording methods to determine the requested + output format. If *format* is specified as a MIME-type the "video/" or + "application/" prefix will be stripped. If *format* is not specified, + then :meth:`_get_output_format` will be called to attempt to determine + format from the *output* object. + """ + if isinstance(format, bytes): + format = format.decode('utf-8') + format = format or self._get_output_format(output) + format = ( + format[6:] if format.startswith('video/') else + format[12:] if format.startswith('application/') else + format) + return format + + def _get_image_encoder( + self, camera_port, output_port, format, resize, **options): + """ + Construct an image encoder for the requested parameters. + + This method is called by :meth:`capture` and :meth:`capture_continuous` + to construct an image encoder. The *camera_port* parameter gives the + MMAL camera port that should be enabled for capture by the encoder. The + *output_port* parameter gives the MMAL port that the encoder should + read output from (this may be the same as the camera port, but may be + different if other component(s) like a splitter have been placed in the + pipeline). The *format* parameter indicates the image format and will + be one of: + + * ``'jpeg'`` + * ``'png'`` + * ``'gif'`` + * ``'bmp'`` + * ``'yuv'`` + * ``'rgb'`` + * ``'rgba'`` + * ``'bgr'`` + * ``'bgra'`` + + The *resize* parameter indicates the size that the encoder should + resize the output to (presumably by including a resizer in the + pipeline). Finally, *options* includes extra keyword arguments that + should be passed verbatim to the encoder. + """ + encoder_class = ( + PiRawOneImageEncoder if format in self.RAW_FORMATS else + PiCookedOneImageEncoder) + return encoder_class( + self, camera_port, output_port, format, resize, **options) + + def _get_images_encoder( + self, camera_port, output_port, format, resize, **options): + """ + Construct a multi-image encoder for the requested parameters. + + This method is largely equivalent to :meth:`_get_image_encoder` with + the exception that the encoder returned should expect to be passed an + iterable of outputs to its :meth:`~PiEncoder.start` method, rather than + a single output object. This method is called by the + :meth:`capture_sequence` method. + + All parameters are the same as in :meth:`_get_image_encoder`. Please + refer to the documentation for that method for further information. + """ + encoder_class = ( + PiRawMultiImageEncoder if format in self.RAW_FORMATS else + PiCookedMultiImageEncoder) + return encoder_class( + self, camera_port, output_port, format, resize, **options) + + def _get_video_encoder( + self, camera_port, output_port, format, resize, **options): + """ + Construct a video encoder for the requested parameters. + + This method is called by :meth:`start_recording` and + :meth:`record_sequence` to construct a video encoder. The + *camera_port* parameter gives the MMAL camera port that should be + enabled for capture by the encoder. The *output_port* parameter gives + the MMAL port that the encoder should read output from (this may be the + same as the camera port, but may be different if other component(s) + like a splitter have been placed in the pipeline). The *format* + parameter indicates the video format and will be one of: + + * ``'h264'`` + * ``'mjpeg'`` + + The *resize* parameter indicates the size that the encoder should + resize the output to (presumably by including a resizer in the + pipeline). Finally, *options* includes extra keyword arguments that + should be passed verbatim to the encoder. + """ + encoder_class = ( + PiRawVideoEncoder if format in self.RAW_FORMATS else + PiCookedVideoEncoder) + return encoder_class( + self, camera_port, output_port, format, resize, **options) + + def close(self): + """ + Finalizes the state of the camera. + + After successfully constructing a :class:`PiCamera` object, you should + ensure you call the :meth:`close` method once you are finished with the + camera (e.g. in the ``finally`` section of a ``try..finally`` block). + This method stops all recording and preview activities and releases all + resources associated with the camera; this is necessary to prevent GPU + memory leaks. + """ + for port in list(self._encoders): + self.stop_recording(splitter_port=port) + assert not self.recording + for overlay in list(self._overlays): + self.remove_overlay(overlay) + if self._preview: + self._preview.close() + self._preview = None + if self._splitter: + self._splitter.close() + self._splitter = None + if self._camera: + self._camera.close() + self._camera = None + exc, self._camera_exception = self._camera_exception, None + if exc: + raise exc + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + def start_preview(self, **options): + """ + Displays the preview overlay. + + This method starts a camera preview as an overlay on the Pi's primary + display (HDMI or composite). A :class:`PiRenderer` instance (more + specifically, a :class:`PiPreviewRenderer`) is constructed with the + keyword arguments captured in *options*, and is returned from the + method (this instance is also accessible from the :attr:`preview` + attribute for as long as the renderer remains active). By default, the + renderer will be opaque and fullscreen. + + This means the default preview overrides whatever is currently visible + on the display. More specifically, the preview does not rely on a + graphical environment like X-Windows (it can run quite happily from a + TTY console); it is simply an overlay on the Pi's video output. To stop + the preview and reveal the display again, call :meth:`stop_preview`. + The preview can be started and stopped multiple times during the + lifetime of the :class:`PiCamera` object. + + All other camera properties can be modified "live" while the preview is + running (e.g. :attr:`brightness`). + + .. note:: + + Because the default preview typically obscures the screen, ensure + you have a means of stopping a preview before starting one. If the + preview obscures your interactive console you won't be able to + Alt+Tab back to it as the preview isn't in a window. If you are in + an interactive Python session, simply pressing Ctrl+D usually + suffices to terminate the environment, including the camera and its + associated preview. + """ + self._check_camera_open() + self._preview.close() + options.setdefault('layer', self._preview_layer) + options.setdefault('alpha', self._preview_alpha) + options.setdefault('fullscreen', self._preview_fullscreen) + options.setdefault('window', self._preview_window) + renderer = PiPreviewRenderer( + self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options) + self._preview = renderer + return renderer + + def stop_preview(self): + """ + Hides the preview overlay. + + If :meth:`start_preview` has previously been called, this method shuts + down the preview display which generally results in the underlying + display becoming visible again. If a preview is not currently running, + no exception is raised - the method will simply do nothing. + """ + self._check_camera_open() + self._preview.close() + self._preview = PiNullSink( + self, self._camera.outputs[self.CAMERA_PREVIEW_PORT]) + + def add_overlay(self, source, size=None, format=None, **options): + """ + Adds a static overlay to the preview output. + + This method creates a new static overlay using the same rendering + mechanism as the preview. Overlays will appear on the Pi's video + output, but will not appear in captures or video recordings. Multiple + overlays can exist; each call to :meth:`add_overlay` returns a new + :class:`PiOverlayRenderer` instance representing the overlay. + + The *source* must be an object that supports the :ref:`buffer protocol + ` in one of the supported unencoded formats: ``'yuv'``, + ``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can + specified explicitly with the optional *format* parameter. If not + specified, the method will attempt to guess the format based on the + length of *source* and the *size* (assuming 3 bytes per pixel for RGB, + and 4 bytes for RGBA). + + The optional *size* parameter specifies the size of the source image as + a ``(width, height)`` tuple. If this is omitted or ``None`` then the + size is assumed to be the same as the camera's current + :attr:`resolution`. + + The length of *source* must take into account that widths are rounded + up to the nearest multiple of 32, and heights to the nearest multiple + of 16. For example, if *size* is ``(1280, 720)``, and *format* is + ``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3 + bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is + a multiple of 16 no extra rounding is required). However, if *size* is + ``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer + with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column + 97 and row 57 in the source will be ignored). + + New overlays default to *layer* 0, whilst the preview defaults to layer + 2. Higher numbered layers obscure lower numbered layers, hence new + overlays will be invisible (if the preview is running) by default. You + can make the new overlay visible either by making any existing preview + transparent (with the :attr:`~PiRenderer.alpha` property) or by moving + the overlay into a layer higher than the preview (with the + :attr:`~PiRenderer.layer` property). + + All keyword arguments captured in *options* are passed onto the + :class:`PiRenderer` constructor. All camera properties except + :attr:`resolution` and :attr:`framerate` can be modified while overlays + exist. The reason for these exceptions is that the overlay has a static + resolution and changing the camera's mode would require resizing of the + source. + + .. warning:: + + If too many overlays are added, the display output will be disabled + and a reboot will generally be required to restore the display. + Overlays are composited "on the fly". Hence, a real-time constraint + exists wherein for each horizontal line of HDMI output, the content + of all source layers must be fetched, resized, converted, and + blended to produce the output pixels. + + If enough overlays exist (where "enough" is a number dependent on + overlay size, display resolution, bus frequency, and several other + factors making it unrealistic to calculate in advance), this + process breaks down and video output fails. One solution is to add + ``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of + an off-screen buffer. Be aware that this requires more GPU memory + and may reduce the update rate. + + .. _RGB: https://en.wikipedia.org/wiki/RGB + .. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space + + .. versionadded:: 1.8 + + .. versionchanged:: 1.13 + Added *format* parameter + """ + self._check_camera_open() + renderer = PiOverlayRenderer(self, source, size, format, **options) + self._overlays.append(renderer) + return renderer + + def remove_overlay(self, overlay): + """ + Removes a static overlay from the preview output. + + This method removes an overlay which was previously created by + :meth:`add_overlay`. The *overlay* parameter specifies the + :class:`PiRenderer` instance that was returned by :meth:`add_overlay`. + + .. versionadded:: 1.8 + """ + if not overlay in self._overlays: + raise PiCameraValueError( + "The specified overlay is not owned by this instance of " + "PiCamera") + overlay.close() + self._overlays.remove(overlay) + + def start_recording( + self, output, format=None, resize=None, splitter_port=1, **options): + """ + Start recording video from the camera, storing it in *output*. + + If *output* is a string, it will be treated as a filename for a new + file which the video will be written to. If *output* is not a string, + but is an object with a ``write`` method, it is assumed to be a + file-like object and the video data is appended to it (the + implementation only assumes the object has a ``write()`` method - no + other methods are required but ``flush`` will be called at the end of + recording if it is present). If *output* is not a string, and has no + ``write`` method it is assumed to be a writeable object implementing + the buffer protocol. In this case, the video frames will be written + sequentially to the underlying buffer (which must be large enough to + accept all frame data). + + If *format* is ``None`` (the default), the method will attempt to guess + the required video format from the extension of *output* (if it's a + string), or from the *name* attribute of *output* (if it has one). In + the case that the format cannot be determined, a + :exc:`PiCameraValueError` will be raised. + + If *format* is not ``None``, it must be a string specifying the format + that you want the video output in. The format can be a MIME-type or + one of the following strings: + + * ``'h264'`` - Write an H.264 video stream + * ``'mjpeg'`` - Write an M-JPEG video stream + * ``'yuv'`` - Write the raw video data to a file in YUV420 format + * ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format + * ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format + * ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format + * ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format + + If *resize* is not ``None`` (the default), it must be a two-element + tuple specifying the width and height that the video recording should + be resized to. This is particularly useful for recording video using + the full resolution of the camera sensor (which is not possible in + H.264 without down-sizing the output). + + The *splitter_port* parameter specifies the port of the built-in + splitter that the video encoder will be attached to. This defaults to + ``1`` and most users will have no need to specify anything different. + If you wish to record multiple (presumably resized) streams + simultaneously, specify a value between ``0`` and ``3`` inclusive for + this parameter, ensuring that you do not specify a port that is + currently in use. + + Certain formats accept additional options which can be specified + as keyword arguments. The ``'h264'`` format accepts the following + additional options: + + * *profile* - The H.264 profile to use for encoding. Defaults to + 'high', but can be one of 'baseline', 'main', 'high', or + 'constrained'. + + * *level* - The `H.264 level`_ to use for encoding. Defaults to '4', + but can be any H.264 level up to '4.2'. + + * *intra_period* - The key frame rate (the rate at which I-frames are + inserted in the output). Defaults to ``None``, but can be any 32-bit + integer value representing the number of frames between successive + I-frames. The special value 0 causes the encoder to produce a single + initial I-frame, and then only P-frames subsequently. Note that + :meth:`split_recording` will fail in this mode. + + * *intra_refresh* - The key frame format (the way in which I-frames + will be inserted into the output stream). Defaults to ``None``, but + can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'. + + * *inline_headers* - When ``True``, specifies that the encoder should + output SPS/PPS headers within the stream to ensure GOPs (groups of + pictures) are self describing. This is important for streaming + applications where the client may wish to seek within the stream, and + enables the use of :meth:`split_recording`. Defaults to ``True`` if + not specified. + + * *sei* - When ``True``, specifies the encoder should include + "Supplemental Enhancement Information" within the output stream. + Defaults to ``False`` if not specified. + + * *sps_timing* - When ``True`` the encoder includes the camera's + framerate in the SPS header. Defaults to ``False`` if not specified. + + * *motion_output* - Indicates the output destination for motion vector + estimation data. When ``None`` (the default), motion data is not + output. Otherwise, this can be a filename string, a file-like object, + or a writeable buffer object (as with the *output* parameter). + + All encoded formats accept the following additional options: + + * *bitrate* - The bitrate at which video will be encoded. Defaults to + 17000000 (17Mbps) if not specified. The maximum value depends on the + selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder + should not use bitrate control (the encoder is limited by the quality + only). + + * *quality* - Specifies the quality that the encoder should attempt + to maintain. For the ``'h264'`` format, use values between 10 and 40 + where 10 is extremely high quality, and 40 is extremely low (20-25 is + usually a reasonable range for H.264 encoding). For the ``mjpeg`` + format, the quality is ignored (it can be specified without error + though); *bitrate* alone controls the quality of the output. + + * *quantization* - Deprecated alias for *quality*. + + .. versionchanged:: 1.0 + The *resize* parameter was added, and ``'mjpeg'`` was added as a + recording format + + .. versionchanged:: 1.3 + The *splitter_port* parameter was added + + .. versionchanged:: 1.5 + The *quantization* parameter was deprecated in favor of *quality*, + and the *motion_output* parameter was added. + + .. versionchanged:: 1.11 + Support for buffer outputs was added. + + .. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels + """ + if 'quantization' in options: + warnings.warn( + PiCameraDeprecated( + 'The quantization option is deprecated; please use ' + 'quality instead (same value)')) + with self._encoders_lock: + camera_port, output_port = self._get_ports(True, splitter_port) + format = self._get_video_format(output, format) + encoder = self._get_video_encoder( + camera_port, output_port, format, resize, **options) + self._encoders[splitter_port] = encoder + try: + encoder.start(output, options.get('motion_output')) + except Exception as e: + encoder.close() + with self._encoders_lock: + del self._encoders[splitter_port] + raise + + def split_recording(self, output, splitter_port=1, **options): + """ + Continue the recording in the specified output; close existing output. + + When called, the video encoder will wait for the next appropriate + split point (an inline SPS header), then will cease writing to the + current output (and close it, if it was specified as a filename), and + continue writing to the newly specified *output*. + + The *output* parameter is treated as in the :meth:`start_recording` + method (it can be a string, a file-like object, or a writeable + buffer object). + + The *motion_output* parameter can be used to redirect the output of the + motion vector data in the same fashion as *output*. If *motion_output* + is ``None`` (the default) then motion vector data will not be + redirected and will continue being written to the output specified by + the *motion_output* parameter given to :meth:`start_recording`. + Alternatively, if you only wish to redirect motion vector data, you can + set *output* to ``None`` and given a new value for *motion_output*. + + The *splitter_port* parameter specifies which port of the video + splitter the encoder you wish to change outputs is attached to. This + defaults to ``1`` and most users will have no need to specify anything + different. Valid values are between ``0`` and ``3`` inclusive. + + Note that unlike :meth:`start_recording`, you cannot specify format or + other options as these cannot be changed in the middle of recording. + Only the new *output* (and *motion_output*) can be specified. + Furthermore, the format of the recording is currently limited to H264, + and *inline_headers* must be ``True`` when :meth:`start_recording` is + called (this is the default). + + The method returns the meta-data of the first :class:`PiVideoFrame` + that is written to the new output. + + .. versionchanged:: 1.3 + The *splitter_port* parameter was added + + .. versionchanged:: 1.5 + The *motion_output* parameter was added + + .. versionchanged:: 1.11 + Support for buffer outputs was added. + """ + try: + with self._encoders_lock: + encoder = self._encoders[splitter_port] + except KeyError: + raise PiCameraNotRecording( + 'There is no recording in progress on ' + 'port %d' % splitter_port) + else: + return encoder.split(output, options.get('motion_output')) + + def request_key_frame(self, splitter_port=1): + """ + Request the encoder generate a key-frame as soon as possible. + + When called, the video encoder running on the specified *splitter_port* + will attempt to produce a key-frame (full-image frame) as soon as + possible. The *splitter_port* defaults to ``1``. Valid values are + between ``0`` and ``3`` inclusive. + + .. note:: + + This method is only meaningful for recordings encoded in the H264 + format as MJPEG produces full frames for every frame recorded. + Furthermore, there's no guarantee that the *next* frame will be + a key-frame; this is simply a request to produce one as soon as + possible after the call. + + .. versionadded:: 1.11 + """ + try: + with self._encoders_lock: + encoder = self._encoders[splitter_port] + except KeyError: + raise PiCameraNotRecording( + 'There is no recording in progress on ' + 'port %d' % splitter_port) + else: + encoder.request_key_frame() + + def wait_recording(self, timeout=0, splitter_port=1): + """ + Wait on the video encoder for timeout seconds. + + It is recommended that this method is called while recording to check + for exceptions. If an error occurs during recording (for example out of + disk space) the recording will stop, but an exception will only be + raised when the :meth:`wait_recording` or :meth:`stop_recording` + methods are called. + + If ``timeout`` is 0 (the default) the function will immediately return + (or raise an exception if an error has occurred). + + The *splitter_port* parameter specifies which port of the video + splitter the encoder you wish to wait on is attached to. This + defaults to ``1`` and most users will have no need to specify anything + different. Valid values are between ``0`` and ``3`` inclusive. + + .. versionchanged:: 1.3 + The *splitter_port* parameter was added + """ + assert timeout is not None + try: + with self._encoders_lock: + encoder = self._encoders[splitter_port] + except KeyError: + raise PiCameraNotRecording( + 'There is no recording in progress on ' + 'port %d' % splitter_port) + else: + encoder.wait(timeout) + + def stop_recording(self, splitter_port=1): + """ + Stop recording video from the camera. + + After calling this method the video encoder will be shut down and + output will stop being written to the file-like object specified with + :meth:`start_recording`. If an error occurred during recording and + :meth:`wait_recording` has not been called since the error then this + method will raise the exception. + + The *splitter_port* parameter specifies which port of the video + splitter the encoder you wish to stop is attached to. This defaults to + ``1`` and most users will have no need to specify anything different. + Valid values are between ``0`` and ``3`` inclusive. + + .. versionchanged:: 1.3 + The *splitter_port* parameter was added + """ + try: + with self._encoders_lock: + encoder = self._encoders[splitter_port] + except KeyError: + raise PiCameraNotRecording( + 'There is no recording in progress on ' + 'port %d' % splitter_port) + else: + try: + self.wait_recording(0, splitter_port) + finally: + encoder.close() + with self._encoders_lock: + del self._encoders[splitter_port] + + def record_sequence( + self, outputs, format='h264', resize=None, splitter_port=1, **options): + """ + Record a sequence of video clips from the camera. + + This method accepts a sequence or iterator of *outputs* each of which + must either be a string specifying a filename for output, or a + file-like object with a ``write`` method. + + The method acts as an iterator itself, yielding each item of the + sequence in turn. In this way, the caller can control how long to + record to each item by only permitting the loop to continue when ready + to switch to the next output. + + The *format*, *splitter_port*, *resize*, and *options* parameters are + the same as in :meth:`start_recording`, but *format* defaults to + ``'h264'``. The format is **not** derived from the filenames in + *outputs* by this method. + + For example, to record 3 consecutive 10-second video clips, writing the + output to a series of H.264 files named clip01.h264, clip02.h264, and + clip03.h264 one could use the following:: + + import picamera + with picamera.PiCamera() as camera: + for filename in camera.record_sequence([ + 'clip01.h264', + 'clip02.h264', + 'clip03.h264']): + print('Recording to %s' % filename) + camera.wait_recording(10) + + Alternatively, a more flexible method of writing the previous example + (which is easier to expand to a large number of output files) is by + using a generator expression as the input sequence:: + + import picamera + with picamera.PiCamera() as camera: + for filename in camera.record_sequence( + 'clip%02d.h264' % i for i in range(3)): + print('Recording to %s' % filename) + camera.wait_recording(10) + + More advanced techniques are also possible by utilising infinite + sequences, such as those generated by :func:`itertools.cycle`. In the + following example, recording is switched between two in-memory streams. + Whilst one stream is recording, the other is being analysed. The script + only stops recording when a video recording meets some criteria defined + by the ``process`` function:: + + import io + import itertools + import picamera + with picamera.PiCamera() as camera: + analyse = None + for stream in camera.record_sequence( + itertools.cycle((io.BytesIO(), io.BytesIO()))): + if analyse is not None: + if process(analyse): + break + analyse.seek(0) + analyse.truncate() + camera.wait_recording(5) + analyse = stream + + .. versionadded:: 1.3 + """ + with self._encoders_lock: + camera_port, output_port = self._get_ports(True, splitter_port) + format = self._get_video_format('', format) + encoder = self._get_video_encoder( + camera_port, output_port, format, resize, **options) + self._encoders[splitter_port] = encoder + try: + start = True + for output in outputs: + if start: + start = False + encoder.start(output, options.get('motion_output')) + else: + encoder.split(output) + yield output + finally: + try: + encoder.wait(0) + finally: + encoder.close() + with self._encoders_lock: + del self._encoders[splitter_port] + + def capture( + self, output, format=None, use_video_port=False, resize=None, + splitter_port=0, bayer=False, **options): + """ + Capture an image from the camera, storing it in *output*. + + If *output* is a string, it will be treated as a filename for a new + file which the image will be written to. If *output* is not a string, + but is an object with a ``write`` method, it is assumed to be a + file-like object and the image data is appended to it (the + implementation only assumes the object has a ``write`` method - no + other methods are required but ``flush`` will be called at the end of + capture if it is present). If *output* is not a string, and has no + ``write`` method it is assumed to be a writeable object implementing + the buffer protocol. In this case, the image data will be written + directly to the underlying buffer (which must be large enough to accept + the image data). + + If *format* is ``None`` (the default), the method will attempt to guess + the required image format from the extension of *output* (if it's a + string), or from the *name* attribute of *output* (if it has one). In + the case that the format cannot be determined, a + :exc:`PiCameraValueError` will be raised. + + If *format* is not ``None``, it must be a string specifying the format + that you want the image output in. The format can be a MIME-type or + one of the following strings: + + * ``'jpeg'`` - Write a JPEG file + * ``'png'`` - Write a PNG file + * ``'gif'`` - Write a GIF file + * ``'bmp'`` - Write a Windows bitmap file + * ``'yuv'`` - Write the raw image data to a file in YUV420 format + * ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format + * ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format + * ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format + * ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format + * ``'raw'`` - Deprecated option for raw captures; the format is taken + from the deprecated :attr:`raw_format` attribute + + The *use_video_port* parameter controls whether the camera's image or + video port is used to capture images. It defaults to ``False`` which + means that the camera's image port is used. This port is slow but + produces better quality pictures. If you need rapid capture up to the + rate of video frames, set this to ``True``. + + When *use_video_port* is ``True``, the *splitter_port* parameter + specifies the port of the video splitter that the image encoder will be + attached to. This defaults to ``0`` and most users will have no need to + specify anything different. This parameter is ignored when + *use_video_port* is ``False``. See :ref:`mmal` for more information + about the video splitter. + + If *resize* is not ``None`` (the default), it must be a two-element + tuple specifying the width and height that the image should be resized + to. + + .. warning:: + + If *resize* is specified, or *use_video_port* is ``True``, Exif + metadata will **not** be included in JPEG output. This is due to an + underlying firmware limitation. + + Certain file formats accept additional options which can be specified + as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts + additional options, which are: + + * *quality* - Defines the quality of the JPEG encoder as an integer + ranging from 1 to 100. Defaults to 85. Please note that JPEG quality + is not a percentage and `definitions of quality`_ vary widely. + + * *restart* - Defines the restart interval for the JPEG encoder as a + number of JPEG MCUs. The actual restart interval used will be a + multiple of the number of MCUs per row in the resulting image. + + * *thumbnail* - Defines the size and quality of the thumbnail to embed + in the Exif metadata. Specifying ``None`` disables thumbnail + generation. Otherwise, specify a tuple of ``(width, height, + quality)``. Defaults to ``(64, 48, 35)``. + + * *bayer* - If ``True``, the raw bayer data from the camera's sensor + is included in the Exif metadata. + + .. note:: + + The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``, + etc.) do not represent the raw bayer data from the camera's sensor. + Rather they provide access to the image data after GPU processing, + but before format encoding (JPEG, PNG, etc). Currently, the only + method of accessing the raw bayer data is via the *bayer* parameter + described above. + + .. versionchanged:: 1.0 + The *resize* parameter was added, and raw capture formats can now + be specified directly + + .. versionchanged:: 1.3 + The *splitter_port* parameter was added, and *bayer* was added as + an option for the ``'jpeg'`` format + + .. versionchanged:: 1.11 + Support for buffer outputs was added. + + .. _definitions of quality: http://photo.net/learn/jpeg/#qual + """ + if format == 'raw': + warnings.warn( + PiCameraDeprecated( + 'The "raw" format option is deprecated; specify the ' + 'required format directly instead ("yuv", "rgb", etc.)')) + if use_video_port and bayer: + raise PiCameraValueError( + 'bayer is only valid with still port captures') + if 'burst' in options: + raise PiCameraValueError( + 'burst is only valid with capture_sequence or capture_continuous') + with self._encoders_lock: + camera_port, output_port = self._get_ports(use_video_port, splitter_port) + format = self._get_image_format(output, format) + encoder = self._get_image_encoder( + camera_port, output_port, format, resize, **options) + if use_video_port: + self._encoders[splitter_port] = encoder + try: + if bayer: + camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True + encoder.start(output) + # Wait for the callback to set the event indicating the end of + # image capture + if not encoder.wait(self.CAPTURE_TIMEOUT): + raise PiCameraRuntimeError( + 'Timed out waiting for capture to end') + finally: + encoder.close() + with self._encoders_lock: + if use_video_port: + del self._encoders[splitter_port] + + def capture_sequence( + self, outputs, format='jpeg', use_video_port=False, resize=None, + splitter_port=0, burst=False, bayer=False, **options): + """ + Capture a sequence of consecutive images from the camera. + + This method accepts a sequence or iterator of *outputs* each of which + must either be a string specifying a filename for output, or a + file-like object with a ``write`` method, or a writeable buffer object. + For each item in the sequence or iterator of outputs, the camera + captures a single image as fast as it can. + + The *format*, *use_video_port*, *splitter_port*, *resize*, and + *options* parameters are the same as in :meth:`capture`, but *format* + defaults to ``'jpeg'``. The format is **not** derived from the + filenames in *outputs* by this method. + + If *use_video_port* is ``False`` (the default), the *burst* parameter + can be used to make still port captures faster. Specifically, this + prevents the preview from switching resolutions between captures which + significantly speeds up consecutive captures from the still port. The + downside is that this mode is currently has several bugs; the major + issue is that if captures are performed too quickly some frames will + come back severely underexposed. It is recommended that users avoid the + *burst* parameter unless they absolutely require it and are prepared to + work around such issues. + + For example, to capture 3 consecutive images:: + + import time + import picamera + with picamera.PiCamera() as camera: + camera.start_preview() + time.sleep(2) + camera.capture_sequence([ + 'image1.jpg', + 'image2.jpg', + 'image3.jpg', + ]) + camera.stop_preview() + + If you wish to capture a large number of images, a list comprehension + or generator expression can be used to construct the list of filenames + to use:: + + import time + import picamera + with picamera.PiCamera() as camera: + camera.start_preview() + time.sleep(2) + camera.capture_sequence([ + 'image%02d.jpg' % i + for i in range(100) + ]) + camera.stop_preview() + + More complex effects can be obtained by using a generator function to + provide the filenames or output objects. + + .. versionchanged:: 1.0 + The *resize* parameter was added, and raw capture formats can now + be specified directly + + .. versionchanged:: 1.3 + The *splitter_port* parameter was added + + .. versionchanged:: 1.11 + Support for buffer outputs was added. + """ + if use_video_port: + if burst: + raise PiCameraValueError( + 'burst is only valid with still port captures') + if bayer: + raise PiCameraValueError( + 'bayer is only valid with still port captures') + with self._encoders_lock: + camera_port, output_port = self._get_ports(use_video_port, splitter_port) + format = self._get_image_format('', format) + if use_video_port: + encoder = self._get_images_encoder( + camera_port, output_port, format, resize, **options) + self._encoders[splitter_port] = encoder + else: + encoder = self._get_image_encoder( + camera_port, output_port, format, resize, **options) + try: + if use_video_port: + encoder.start(outputs) + encoder.wait() + else: + if burst: + camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True + try: + for output in outputs: + if bayer: + camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True + encoder.start(output) + if not encoder.wait(self.CAPTURE_TIMEOUT): + raise PiCameraRuntimeError( + 'Timed out waiting for capture to end') + finally: + if burst: + camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False + finally: + encoder.close() + with self._encoders_lock: + if use_video_port: + del self._encoders[splitter_port] + + def capture_continuous( + self, output, format=None, use_video_port=False, resize=None, + splitter_port=0, burst=False, bayer=False, **options): + """ + Capture images continuously from the camera as an infinite iterator. + + This method returns an infinite iterator of images captured + continuously from the camera. If *output* is a string, each captured + image is stored in a file named after *output* after substitution of + two values with the :meth:`~str.format` method. Those two values are: + + * ``{counter}`` - a simple incrementor that starts at 1 and increases + by 1 for each image taken + + * ``{timestamp}`` - a :class:`~datetime.datetime` instance + + The table below contains several example values of *output* and the + sequence of filenames those values could produce: + + .. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}| + + +--------------------------------------------+--------------------------------------------+-------+ + | *output* Value | Filenames | Notes | + +============================================+============================================+=======+ + | ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | | + +--------------------------------------------+--------------------------------------------+-------+ + | ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | | + +--------------------------------------------+--------------------------------------------+-------+ + | ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) | + | | image2013-10-05 12:07:32.498539, ... | | + +--------------------------------------------+--------------------------------------------+-------+ + | ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | | + | | image12-10-14-905398.jpg | | + +--------------------------------------------+--------------------------------------------+-------+ + | ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) | + | | 121014-003.jpg, ... | | + +--------------------------------------------+--------------------------------------------+-------+ + + 1. Note that because timestamp's default output includes colons (:), + the resulting filenames are not suitable for use on Windows. For + this reason (and the fact the default contains spaces) it is + strongly recommended you always specify a format when using + ``{timestamp}``. + + 2. You can use both ``{timestamp}`` and ``{counter}`` in a single + format string (multiple times too!) although this tends to be + redundant. + + If *output* is not a string, but has a ``write`` method, it is assumed + to be a file-like object and each image is simply written to this + object sequentially. In this case you will likely either want to write + something to the object between the images to distinguish them, or + clear the object between iterations. If *output* is not a string, and + has no ``write`` method, it is assumed to be a writeable object + supporting the buffer protocol; each image is simply written to the + buffer sequentially. + + The *format*, *use_video_port*, *splitter_port*, *resize*, and + *options* parameters are the same as in :meth:`capture`. + + If *use_video_port* is ``False`` (the default), the *burst* parameter + can be used to make still port captures faster. Specifically, this + prevents the preview from switching resolutions between captures which + significantly speeds up consecutive captures from the still port. The + downside is that this mode is currently has several bugs; the major + issue is that if captures are performed too quickly some frames will + come back severely underexposed. It is recommended that users avoid the + *burst* parameter unless they absolutely require it and are prepared to + work around such issues. + + For example, to capture 60 images with a one second delay between them, + writing the output to a series of JPEG files named image01.jpg, + image02.jpg, etc. one could do the following:: + + import time + import picamera + with picamera.PiCamera() as camera: + camera.start_preview() + try: + for i, filename in enumerate( + camera.capture_continuous('image{counter:02d}.jpg')): + print(filename) + time.sleep(1) + if i == 59: + break + finally: + camera.stop_preview() + + Alternatively, to capture JPEG frames as fast as possible into an + in-memory stream, performing some processing on each stream until + some condition is satisfied:: + + import io + import time + import picamera + with picamera.PiCamera() as camera: + stream = io.BytesIO() + for foo in camera.capture_continuous(stream, format='jpeg'): + # Truncate the stream to the current position (in case + # prior iterations output a longer image) + stream.truncate() + stream.seek(0) + if process(stream): + break + + .. versionchanged:: 1.0 + The *resize* parameter was added, and raw capture formats can now + be specified directly + + .. versionchanged:: 1.3 + The *splitter_port* parameter was added + + .. versionchanged:: 1.11 + Support for buffer outputs was added. + """ + if use_video_port: + if burst: + raise PiCameraValueError( + 'burst is only valid with still port captures') + if bayer: + raise PiCameraValueError( + 'bayer is only valid with still port captures') + with self._encoders_lock: + camera_port, output_port = self._get_ports(use_video_port, splitter_port) + format = self._get_image_format(output, format) + encoder = self._get_image_encoder( + camera_port, output_port, format, resize, **options) + if use_video_port: + self._encoders[splitter_port] = encoder + try: + if burst: + camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True + try: + if isinstance(output, bytes): + # If we're fed a bytes string, assume it's UTF-8 encoded + # and convert it to Unicode. Technically this is wrong + # (file-systems use all sorts of encodings), but UTF-8 is a + # reasonable default and this keeps compatibility with + # Python 2 simple although it breaks the edge cases of + # non-UTF-8 encoded bytes strings with non-UTF-8 encoded + # file-systems + output = output.decode('utf-8') + if isinstance(output, str): + counter = 1 + while True: + filename = output.format( + counter=counter, + timestamp=datetime.datetime.now(), + ) + if bayer: + camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True + encoder.start(filename) + if not encoder.wait(self.CAPTURE_TIMEOUT): + raise PiCameraRuntimeError( + 'Timed out waiting for capture to end') + yield filename + counter += 1 + else: + while True: + if bayer: + camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True + encoder.start(output) + if not encoder.wait(self.CAPTURE_TIMEOUT): + raise PiCameraRuntimeError( + 'Timed out waiting for capture to end') + yield output + finally: + if burst: + camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False + finally: + encoder.close() + with self._encoders_lock: + if use_video_port: + del self._encoders[splitter_port] + + @property + def closed(self): + """ + Returns ``True`` if the :meth:`close` method has been called. + """ + return not self._camera + + @property + def recording(self): + """ + Returns ``True`` if the :meth:`start_recording` method has been called, + and no :meth:`stop_recording` call has been made yet. + """ + return any( + isinstance(e, PiVideoEncoder) and e.active + for e in self._encoders.values() + ) + + @property + def previewing(self): + """ + Returns ``True`` if the :meth:`start_preview` method has been called, + and no :meth:`stop_preview` call has been made yet. + + .. deprecated:: 1.8 + Test whether :attr:`preview` is ``None`` instead. + """ + warnings.warn( + PiCameraDeprecated( + 'PiCamera.previewing is deprecated; test PiCamera.preview ' + 'is not None instead')) + return isinstance(self._preview, PiPreviewRenderer) + + @property + def revision(self): + """ + Returns a string representing the revision of the Pi's camera module. + At the time of writing, the string returned is 'ov5647' for the V1 + module, and 'imx219' for the V2 module. + """ + return self._revision + + @property + def exif_tags(self): + """ + Holds a mapping of the Exif tags to apply to captured images. + + .. note:: + + Please note that Exif tagging is only supported with the ``jpeg`` + format. + + By default several Exif tags are automatically applied to any images + taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to + ``RaspberryPi``), ``IFD0.Model`` (which is set to the camera's revision + string), and three timestamp tags: ``IFD0.DateTime``, + ``EXIF.DateTimeOriginal``, and ``EXIF.DateTimeDigitized`` which are all + set to the current date and time just before the picture is taken. + + If you wish to set additional Exif tags, or override any of the + aforementioned tags, simply add entries to the exif_tags map before + calling :meth:`capture`. For example:: + + camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries' + + The Exif standard mandates ASCII encoding for all textual values, hence + strings containing non-ASCII characters will cause an encoding error to + be raised when :meth:`capture` is called. If you wish to set binary + values, use a :func:`bytes` value:: + + camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters' + + .. warning:: + + Binary Exif values are currently ignored; this appears to be a + libmmal or firmware bug. + + You may also specify datetime values, integer, or float values, all of + which will be converted to appropriate ASCII strings (datetime values + are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif + standard). + + The currently supported Exif tags are: + + +-------+-------------------------------------------------------------+ + | Group | Tags | + +=======+=============================================================+ + | IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, | + | IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, | + | | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, | + | | StripByteCounts, Xresolution, Yresolution, | + | | PlanarConfiguration, ResolutionUnit, TransferFunction, | + | | Software, DateTime, Artist, WhitePoint, | + | | PrimaryChromaticities, JPEGInterchangeFormat, | + | | JPEGInterchangeFormatLength, YcbCrCoefficients, | + | | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, | + | | Copyright | + +-------+-------------------------------------------------------------+ + | EXIF | ExposureTime, FNumber, ExposureProgram, | + | | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, | + | | DateTimeOriginal, DateTimeDigitized, | + | | ComponentsConfiguration, CompressedBitsPerPixel, | + | | ShutterSpeedValue, ApertureValue, BrightnessValue, | + | | ExposureBiasValue, MaxApertureValue, SubjectDistance, | + | | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, | + | | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, | + | | SubSecTimeDigitized, FlashpixVersion, ColorSpace, | + | | PixelXDimension, PixelYDimension, RelatedSoundFile, | + | | FlashEnergy, SpacialFrequencyResponse, | + | | FocalPlaneXResolution, FocalPlaneYResolution, | + | | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, | + | | SensingMethod, FileSource, SceneType, CFAPattern, | + | | CustomRendered, ExposureMode, WhiteBalance, | + | | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, | + | | GainControl, Contrast, Saturation, Sharpness, | + | | DeviceSettingDescription, SubjectDistanceRange, | + | | ImageUniqueID | + +-------+-------------------------------------------------------------+ + | GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, | + | | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, | + | | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, | + | | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, | + | | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, | + | | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, | + | | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, | + | | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, | + | | GPSAreaInformation, GPSDateStamp, GPSDifferential | + +-------+-------------------------------------------------------------+ + | EINT | InteroperabilityIndex, InteroperabilityVersion, | + | | RelatedImageFileFormat, RelatedImageWidth, | + | | RelatedImageLength | + +-------+-------------------------------------------------------------+ + """ + return self._exif_tags + + def _set_led(self, value): + if not self._used_led: + global GPIO + if GPIO: + try: + GPIO.setmode(GPIO.BCM) + GPIO.setwarnings(False) + GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW) + self._used_led = True + except RuntimeError: + # We're probably not running as root. In this case, forget the + # GPIO reference so we don't try anything further + GPIO = None + + if not GPIO: + raise PiCameraRuntimeError( + "GPIO library not found, or not accessible; please install " + "RPi.GPIO or run the script as root") + GPIO.output(self._led_pin, bool(value)) + led = property(None, _set_led, doc=""" + Sets the state of the camera's LED via GPIO. + + If a GPIO library is available (only RPi.GPIO is currently supported), + and if the python process has the necessary privileges (typically this + means running as root via sudo), this property can be used to set the + state of the camera's LED as a boolean value (``True`` is on, ``False`` + is off). + + .. note:: + + This is a write-only property. While it can be used to control the + camera's LED, you cannot query the state of the camera's LED using + this property. + + .. note:: + + At present, the camera's LED cannot be controlled on the Pi 3 or 3+ + (the GPIOs used to control the camera LED were re-routed to GPIO + expander on these models). + + .. warning:: + + There are circumstances in which the camera firmware may override + an existing LED setting. For example, in the case that the firmware + resets the camera (as can happen with a CSI-2 timeout), the LED may + also be reset. If you wish to guarantee that the LED remain off at + all times, you may prefer to use the ``disable_camera_led`` option + in `config.txt`_ (this has the added advantage that GPIO access is + not required, at least for LED control). + + .. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md + """) + + def _get_raw_format(self): + warnings.warn( + PiCameraDeprecated( + 'PiCamera.raw_format is deprecated; use required format ' + 'directly with capture methods instead')) + return self._raw_format + def _set_raw_format(self, value): + warnings.warn( + PiCameraDeprecated( + 'PiCamera.raw_format is deprecated; use required format ' + 'directly with capture methods instead')) + if value not in self.RAW_FORMATS: + raise PiCameraValueError("Invalid raw format: %s" % value) + self._raw_format = value + raw_format = property(_get_raw_format, _set_raw_format, doc=""" + Retrieves or sets the raw format of the camera's ports. + + .. deprecated:: 1.0 + Please use ``'yuv'`` or ``'rgb'`` directly as a format in the + various capture methods instead. + """) + + def _get_timestamp(self): + self._check_camera_open() + return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME] + timestamp = property(_get_timestamp, doc=""" + Retrieves the system time according to the camera firmware. + + The camera's timestamp is a 64-bit integer representing the number of + microseconds since the last system boot. When the camera's + :attr:`clock_mode` is ``'raw'`` the values returned by this attribute + are comparable to those from the :attr:`frame` + :attr:`~PiVideoFrame.timestamp` attribute. + """) + + def _get_frame(self): + self._check_camera_open() + for e in self._encoders.values(): + try: + return e.frame + except AttributeError: + pass + raise PiCameraRuntimeError( + "Cannot query frame information when camera is not recording") + frame = property(_get_frame, doc=""" + Retrieves information about the current frame recorded from the camera. + + When video recording is active (after a call to + :meth:`start_recording`), this attribute will return a + :class:`PiVideoFrame` tuple containing information about the current + frame that the camera is recording. + + If multiple video recordings are currently in progress (after multiple + calls to :meth:`start_recording` with different values for the + ``splitter_port`` parameter), which encoder's frame information is + returned is arbitrary. If you require information from a specific + encoder, you will need to extract it from :attr:`_encoders` explicitly. + + Querying this property when the camera is not recording will result in + an exception. + + .. note:: + + There is a small window of time when querying this attribute will + return ``None`` after calling :meth:`start_recording`. If this + attribute returns ``None``, this means that the video encoder has + been initialized, but the camera has not yet returned any frames. + """) + + def _disable_camera(self): + """ + An internal method for disabling the camera, e.g. for re-configuration. + This disables the splitter and preview connections (if they exist). + """ + self._splitter.connection.disable() + self._preview.renderer.connection.disable() + self._camera.disable() + + def _enable_camera(self): + """ + An internal method for enabling the camera after re-configuration. + This ensures the splitter configuration is consistent, then re-enables + the camera along with the splitter and preview connections. + """ + self._camera.enable() + self._preview.renderer.connection.enable() + self._splitter.connection.enable() + + def _configure_splitter(self): + """ + Ensures the splitter has the same format as the attached camera + output port (the video port). + + This method is used to ensure the splitter configuration is sane, + typically after :meth:`_configure_camera` is called. + """ + self._splitter.inputs[0].copy_from( + self._camera.outputs[self.CAMERA_VIDEO_PORT]) + self._splitter.inputs[0].commit() + + def _control_callback(self, port, buf): + try: + if buf.command == mmal.MMAL_EVENT_ERROR: + raise PiCameraRuntimeError( + "No data received from sensor. Check all connections, " + "including the SUNNY chip on the camera board") + elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED: + raise PiCameraRuntimeError( + "Received unexpected camera control callback event, " + "0x%08x" % buf[0].cmd) + except Exception as exc: + # Pass the exception to the main thread; next time + # check_camera_open() is called this will get raised + self._camera_exception = exc + + def _get_config(self): + """ + An internal method for obtaining configuration data to pass to the + :meth:`_configure_camera` method. This is a namedtuple consisting of + all configuration that cannot be changed while the camera is active. + """ + port_num = ( + self.CAMERA_VIDEO_PORT + if self._encoders else + self.CAMERA_PREVIEW_PORT + ) + framerate = Fraction(self._camera.outputs[port_num].framerate) + if framerate == 0: + mp = self._camera.outputs[port_num].params[ + mmal.MMAL_PARAMETER_FPS_RANGE] + framerate = mo.PiFramerateRange(mp.fps_low, mp.fps_high) + return PiCameraConfig( + sensor_mode=self._camera.control.params[ + mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG], + clock_mode=self._camera_config.use_stc_timestamp, + resolution=mo.PiResolution( + int(self._camera_config.max_stills_w), + int(self._camera_config.max_stills_h) + ), + framerate=framerate, + isp_blocks=self._camera.control.params[ + mmal.MMAL_PARAMETER_CAMERA_ISP_BLOCK_OVERRIDE], + colorspace=self._camera.outputs[0].colorspace + ) + + def _configure_camera(self, old, new): + """ + An internal method for setting a new camera mode, framerate, + resolution, clock_mode, and/or ISP blocks. + + This method is used by the setters of the :attr:`resolution`, + :attr:`framerate`, :attr:`framerate_range`, :attr:`sensor_mode`, and + :attr:`isp_blocks` properties. It assumes the camera is currently + disabled. + + The *old* and *new* arguments are :class:`PiCameraConfig` structures. + Both are required to ensure correct operation on older firmwares + (specifically that we don't try to set the sensor mode when both old + and new modes are 0 or automatic). + """ + old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy( + self._camera_config) + old_ports = [ + ( + port.framesize, + port.framerate, + port.params[mmal.MMAL_PARAMETER_FPS_RANGE] + ) + for port in self._camera.outputs + ] + if old.sensor_mode != 0 or new.sensor_mode != 0: + # Old firmware support: only attempt to set sensor mode when + # explicitly requested + self._camera.control.params[ + mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG + ] = new.sensor_mode + if not self._camera.control.enabled: + # One-time initial setup + self._camera.control.enable(self._control_callback) + preview_resolution = new.resolution + elif ( + self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize == + self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize + ): + preview_resolution = new.resolution + else: + preview_resolution = self._camera.outputs[ + self.CAMERA_PREVIEW_PORT].framesize + try: + # Old firmware support: only attempt to set ISP block override when + # explicitly requested + if ( + old.isp_blocks not in (0, 0xFFFFFFFF) or + new.isp_blocks not in (0, 0xFFFFFFFF)): + self._camera.control.params[ + mmal.MMAL_PARAMETER_CAMERA_ISP_BLOCK_OVERRIDE + ] = new.isp_blocks + try: + fps_low, fps_high = new.framerate + except TypeError: + fps_low = fps_high = new.framerate + else: + new = new._replace(framerate=0) + fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T( + mmal.MMAL_PARAMETER_HEADER_T( + mmal.MMAL_PARAMETER_FPS_RANGE, + ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T) + ), + fps_low=mo.to_rational(fps_low), + fps_high=mo.to_rational(fps_high), + ) + + cc = self._camera_config + cc.max_stills_w = new.resolution.width + cc.max_stills_h = new.resolution.height + cc.stills_yuv422 = 0 + cc.one_shot_stills = 1 + cc.max_preview_video_w = new.resolution.width + cc.max_preview_video_h = new.resolution.height + cc.num_preview_video_frames = max(3, fps_high // 10) + cc.stills_capture_circular_buffer_height = 0 + cc.fast_preview_resume = 0 + cc.use_stc_timestamp = new.clock_mode + self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc + + # Clamp preview resolution to camera's resolution + if ( + preview_resolution.width > new.resolution.width or + preview_resolution.height > new.resolution.height + ): + preview_resolution = new.resolution + for port in self._camera.outputs: + port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range + if port.index == self.CAMERA_PREVIEW_PORT: + port.framesize = preview_resolution + else: + port.framesize = new.resolution + port.framerate = new.framerate + port.colorspace = new.colorspace + port.commit() + except: + # If anything goes wrong, restore original resolution and + # framerate otherwise the camera can be left in unusual states + # (camera config not matching ports, etc). + self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc + self._camera_config = old_cc + for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports): + port.framesize = res + port.framerate = fps + port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range + port.commit() + raise + + def _get_framerate(self): + self._check_camera_open() + port_num = ( + self.CAMERA_VIDEO_PORT + if self._encoders else + self.CAMERA_PREVIEW_PORT + ) + return mo.PiCameraFraction(self._camera.outputs[port_num].framerate) + def _set_framerate(self, value): + self._check_camera_open() + self._check_recording_stopped() + value = mo.to_fraction(value, den_limit=256) + if not (0 < value <= self.MAX_FRAMERATE): + raise PiCameraValueError("Invalid framerate: %.2ffps" % value) + config = self._get_config() + self._disable_camera() + self._configure_camera(config, config._replace(framerate=value)) + self._configure_splitter() + self._enable_camera() + framerate = property(_get_framerate, _set_framerate, doc="""\ + Retrieves or sets the framerate at which video-port based image + captures, video recordings, and previews will run. + + When queried, the :attr:`framerate` property returns the rate at which + the camera's video and preview ports will operate as a + :class:`~fractions.Fraction` instance (which can be easily converted to + an :class:`int` or :class:`float`). If :attr:`framerate_range` has been + set, then :attr:`framerate` will be 0 which indicates that a dynamic + range of framerates is being used. + + .. note:: + + For backwards compatibility, a derivative of the + :class:`~fractions.Fraction` class is actually used which permits + the value to be treated as a tuple of ``(numerator, denominator)``. + + Setting and retrieving framerate as a ``(numerator, denominator)`` + tuple is deprecated and will be removed in 2.0. Please use a + :class:`~fractions.Fraction` instance instead (which is just as + accurate and also permits direct use with math operators). + + When set, the property configures the camera so that the next call to + recording and previewing methods will use the new framerate. Setting + this property implicitly sets :attr:`framerate_range` so that the low + and high values are equal to the new framerate. The framerate can be + specified as an :ref:`int `, :ref:`float `, + :class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple. + For example, the following definitions are all equivalent:: + + from fractions import Fraction + + camera.framerate = 30 + camera.framerate = 30 / 1 + camera.framerate = Fraction(30, 1) + camera.framerate = (30, 1) # deprecated + + The camera must not be closed, and no recording must be active when the + property is set. + + .. note:: + + This attribute, in combination with :attr:`resolution`, determines + the mode that the camera operates in. The actual sensor framerate + and resolution used by the camera is influenced, but not directly + set, by this property. See :attr:`sensor_mode` for more + information. + + The initial value of this property can be specified with the + *framerate* parameter in the :class:`PiCamera` constructor, and will + default to 30 if not specified. + """) + + @property + def sensor_modes(self): + """ + Returns a mapping describing the available sensor modes for the + camera's :attr:`revision`. + + This read-only attribute returns a dictionary mapping sensor mode + numbers (1..7) to instances of :class:`PiSensorMode` which contain the + resolution, range of framerates, and other details about the mode. + Note that the default mode (0) is not represented, as this indicates + that the mode should be selected automatically by the firmware based + on the requested :attr:`resolution` and :attr:`framerate`. + + .. versionadded:: 1.14 + """ + return PiCamera.SENSOR_MODES[self.revision] + + def _get_sensor_mode(self): + self._check_camera_open() + return self._camera.control.params[ + mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] + def _set_sensor_mode(self, value): + self._check_camera_open() + self._check_recording_stopped() + try: + if not (0 <= value <= 7): + raise PiCameraValueError( + "Invalid sensor mode: %d (valid range 0..7)" % value) + except TypeError: + raise PiCameraValueError("Invalid sensor mode: %s" % value) + config = self._get_config() + self._disable_camera() + self._configure_camera(config, config._replace(sensor_mode=value)) + self._configure_splitter() + self._enable_camera() + sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\ + Retrieves or sets the input mode of the camera's sensor. + + This is an advanced property which can be used to control the camera's + sensor mode. By default, mode 0 is used which allows the camera to + automatically select an input mode based on the requested + :attr:`resolution` and :attr:`framerate`. Valid values are currently + between 0 and 7. The set of valid sensor modes (along with the + heuristic used to select one automatically) are detailed in the + :ref:`camera_modes` section of the documentation. + + .. note:: + + At the time of writing, setting this property does nothing unless + the camera has been initialized with a sensor mode other than 0. + Furthermore, some mode transitions appear to require setting the + property twice (in a row). This appears to be a firmware + limitation. + + The initial value of this property can be specified with the + *sensor_mode* parameter in the :class:`PiCamera` constructor, and will + default to 0 if not specified. + + .. versionadded:: 1.9 + """) + + def _get_clock_mode(self): + self._check_camera_open() + return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp] + def _set_clock_mode(self, value): + self._check_camera_open() + self._check_recording_stopped() + try: + clock_mode = self.CLOCK_MODES[value] + except KeyError: + raise PiCameraValueError("Invalid clock mode %s" % value) + config = self._get_config() + self._disable_camera() + self._configure_camera(config, config._replace(clock_mode=clock_mode)) + self._configure_splitter() + self._enable_camera() + clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\ + Retrieves or sets the mode of the camera's clock. + + This is an advanced property which can be used to control the nature of + the frame timestamps available from the :attr:`frame` property. When + this is "reset" (the default) each frame's timestamp will be relative + to the start of the recording. When this is "raw", each frame's + timestamp will be relative to the last initialization of the camera. + + The initial value of this property can be specified with the + *clock_mode* parameter in the :class:`PiCamera` constructor, and will + default to "reset" if not specified. + + .. versionadded:: 1.11 + """) + + def _get_isp_blocks(self): + self._check_camera_open() + # XXX Older firmware support? + value = self._camera.control.params[ + mmal.MMAL_PARAMETER_CAMERA_ISP_BLOCK_OVERRIDE] + return { + v for k, v in self._ISP_BLOCKS_R.items() + if value == 0 or k & value + } + def _set_isp_blocks(self, value): + self._check_camera_open() + self._check_recording_stopped() + value = set(value) + invalid = value - set(self.ISP_BLOCKS.keys()) + if invalid: + raise PiCameraValueerror("Invalid ISP block %s" % invalid.pop()) + isp_blocks = reduce(and_, (~v for k, v in self.ISP_BLOCKS.items() + if k not in value), 0xFFFFFFFF) + config = self._get_config() + self._disable_camera() + self._configure_camera(config, config._replace(isp_blocks=isp_blocks)) + self._configure_splitter() + self._enable_camera() + isp_blocks = property(_get_isp_blocks, _set_isp_blocks, doc="""\ + Retrieves or sets which ISP blocks are enabled for processing. + + This is an advanced property which can be used to disable various + processes within the camera's firmware. The value is a set of strings + indicating which blocks are currently active. Valid strings that can + be included are: + + {values} + + It is strongly recommended that modifications to this property are + done by union or difference rather than straight assignment. Control + for further ISP blocks may be added in future and this will ensure + your code does not inadvertantly disable blocks it does not intend to. + For instance to disable the block responsible for AWB gains:: + + camera.isp_blocks -= {{'white-balance'}} + + Then to re-enable the same block:: + + camera.isp_blocks |= {{'white-balance'}} + + The camera must not be closed, and no recording must be active when the + property is set. + + .. versionadded:: 1.14 + """.format(values=docstring_values(ISP_BLOCKS))) + + def _get_colorspace(self): + return self._COLORSPACES_R[self._camera.outputs[0].colorspace] + def _set_colorspace(self, value): + self._check_camera_open() + self._check_recording_stopped() + try: + colorspace = self.COLORSPACES[value] + except KeyError: + raise PiCameraValueError("Invalid colorspace %s" % value) + config = self._get_config() + self._disable_camera() + self._configure_camera(config, config._replace(colorspace=colorspace)) + self._configure_splitter() + self._enable_camera() + colorspace = property(_get_colorspace, _set_colorspace, doc="""\ + Retrieves or sets the `color space`_ that the camera uses for + conversion between the `YUV`_ and RGB systems. + + The value is a string that represents which of a series of fixed + conversion tables are used by the camera firmware (the firmware works + largely in the YUV color system internally). The following strings are + the valid values: + + {values} + + The "bt601" and "bt709" values correspond to the standard `SDTV and + HDTV tables`_. The "auto" value is the default and corresponds to + "bt601" in practice. One of these values is likely what you want when + recording H.264 video. However, when recording MJPEG video, you may + want to use the "jfif" table instead as it produces luma values in the + 0-255 range, rather than the 16-235 range produced by the standard + tables. + + The camera must not be closed, and no recording must be active when the + property is set. + + .. _color space: https://en.wikipedia.org/wiki/Color_space + .. _YUV: https://en.wikipedia.org/wiki/YUV + .. _SDTV and HDTV tables: https://en.wikipedia.org/wiki/YUV#Conversion_to/from_RGB + """.format(values=docstring_values(COLORSPACES))) + + def _get_resolution(self): + self._check_camera_open() + return mo.PiResolution( + int(self._camera_config.max_stills_w), + int(self._camera_config.max_stills_h) + ) + def _set_resolution(self, value): + self._check_camera_open() + self._check_recording_stopped() + value = mo.to_resolution(value) + if not ( + (0 < value.width <= self.MAX_RESOLUTION.width) and + (0 < value.height <= self.MAX_RESOLUTION.height)): + raise PiCameraValueError( + "Invalid resolution requested: %r" % (value,)) + config = self._get_config() + self._disable_camera() + self._configure_camera(config, config._replace(resolution=value)) + self._configure_splitter() + self._enable_camera() + resolution = property(_get_resolution, _set_resolution, doc=""" + Retrieves or sets the resolution at which image captures, video + recordings, and previews will be captured. + + When queried, the :attr:`resolution` property returns the resolution at + which the camera will operate as a tuple of ``(width, height)`` + measured in pixels. This is the resolution that the :meth:`capture` + method will produce images at, and the resolution that + :meth:`start_recording` will produce videos at. + + When set, the property configures the camera so that the next call to + these methods will use the new resolution. The resolution can be + specified as a ``(width, height)`` tuple, as a string formatted + ``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized + `display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For + example, the following definitions are all equivalent:: + + camera.resolution = (1280, 720) + camera.resolution = '1280x720' + camera.resolution = '1280 x 720' + camera.resolution = 'HD' + camera.resolution = '720p' + + The camera must not be closed, and no recording must be active when the + property is set. + + .. note:: + + This attribute, in combination with :attr:`framerate`, determines + the mode that the camera operates in. The actual sensor framerate + and resolution used by the camera is influenced, but not directly + set, by this property. See :attr:`sensor_mode` for more + information. + + The initial value of this property can be specified with the + *resolution* parameter in the :class:`PiCamera` constructor, and will + default to the display's resolution or 1280x720 if the display has + been disabled (with ``tvservice -o``). + + .. versionchanged:: 1.11 + Resolution permitted to be set as a string. Preview resolution + added as separate property. + + .. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution + """) + + def _get_framerate_range(self): + self._check_camera_open() + port_num = ( + self.CAMERA_VIDEO_PORT + if self._encoders else + self.CAMERA_PREVIEW_PORT + ) + mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE] + return mo.PiFramerateRange(mp.fps_low, mp.fps_high) + def _set_framerate_range(self, value): + self._check_camera_open() + self._check_recording_stopped() + low, high = value + low = mo.to_fraction(low, den_limit=256) + high = mo.to_fraction(high, den_limit=256) + if not (0 < low <= self.MAX_FRAMERATE): + raise PiCameraValueError("Invalid low framerate: %.2ffps" % low) + if not (0 < high <= self.MAX_FRAMERATE): + raise PiCameraValueError("Invalid high framerate: %.2ffps" % high) + if high < low: + raise PiCameraValueError("framerate_range is backwards") + config = self._get_config() + self._disable_camera() + self._configure_camera(config, config._replace(framerate=(low, high))) + self._configure_splitter() + self._enable_camera() + framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\ + Retrieves or sets a range between which the camera's framerate is + allowed to float. + + When queried, the :attr:`framerate_range` property returns a + :func:`~collections.namedtuple` derivative with ``low`` and ``high`` + components (index 0 and 1 respectively) which specify the limits of the + permitted framerate range. + + When set, the property configures the camera so that the next call to + recording and previewing methods will use the new framerate range. + Setting this property will implicitly set the :attr:`framerate` + property to 0 (indicating that a dynamic range of framerates is in use + by the camera). + + .. note:: + + Use of this property prevents use of :attr:`framerate_delta` (there + would be little point in making fractional adjustments to the + framerate when the framerate itself is variable). + + The low and high framerates can be specified as :ref:`int + `, :ref:`float `, or + :class:`~fractions.Fraction` values. For example, the following + definitions are all equivalent:: + + from fractions import Fraction + + camera.framerate_range = (0.16666, 30) + camera.framerate_range = (Fraction(1, 6), 30 / 1) + camera.framerate_range = (Fraction(1, 6), Fraction(30, 1)) + + The camera must not be closed, and no recording must be active when the + property is set. + + .. note:: + + This attribute, like :attr:`framerate`, determines the mode that + the camera operates in. The actual sensor framerate and resolution + used by the camera is influenced, but not directly set, by this + property. See :attr:`sensor_mode` for more information. + + .. versionadded:: 1.13 + """) + + def _get_framerate_delta(self): + self._check_camera_open() + if self.framerate == 0: + raise PiCameraValueError( + 'framerate_delta cannot be used with framerate_range') + port_num = ( + self.CAMERA_VIDEO_PORT + if self._encoders else + self.CAMERA_PREVIEW_PORT + ) + return self._camera.outputs[port_num].params[ + mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate + def _set_framerate_delta(self, value): + self._check_camera_open() + if self.framerate == 0: + raise PiCameraValueError( + 'framerate_delta cannot be used with framerate_range') + value = mo.to_fraction(self.framerate + value, den_limit=256) + self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[ + mmal.MMAL_PARAMETER_FRAME_RATE] = value + self._camera.outputs[self.CAMERA_VIDEO_PORT].params[ + mmal.MMAL_PARAMETER_FRAME_RATE] = value + framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\ + Retrieves or sets a fractional amount that is added to the camera's + framerate for the purpose of minor framerate adjustments. + + When queried, the :attr:`framerate_delta` property returns the amount + that the camera's :attr:`framerate` has been adjusted. This defaults + to 0 (so the camera's framerate is the actual framerate used). + + When set, the property adjusts the camera's framerate on the fly. The + property can be set while recordings or previews are in progress. Thus + the framerate used by the camera is actually :attr:`framerate` + + :attr:`framerate_delta`. + + .. note:: + + Framerates deltas can be fractional with adjustments as small as + 1/256th of an fps possible (finer adjustments will be rounded). + With an appropriately tuned PID controller, this can be used to + achieve synchronization between the camera framerate and other + devices. + + If the new framerate demands a mode switch (such as moving between a + low framerate and a high framerate mode), currently active recordings + may drop a frame. This should only happen when specifying quite large + deltas, or when framerate is at the boundary of a sensor mode (e.g. + 49fps). + + The framerate delta can be specified as an :ref:`int `, + :ref:`float `, :class:`~fractions.Fraction` or a + ``(numerator, denominator)`` tuple. For example, the following + definitions are all equivalent:: + + from fractions import Fraction + + camera.framerate_delta = 0.5 + camera.framerate_delta = 1 / 2 # in python 3 + camera.framerate_delta = Fraction(1, 2) + camera.framerate_delta = (1, 2) # deprecated + + .. note:: + + This property is implicitly reset to 0 when :attr:`framerate` or + :attr:`framerate_range` is set. When :attr:`framerate` is 0 + (indicating that :attr:`framerate_range` is set), this property + cannot be used. (there would be little point in making fractional + adjustments to the framerate when the framerate itself is + variable). + + .. versionadded:: 1.11 + """) + + def _get_still_stats(self): + self._check_camera_open() + return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] + def _set_still_stats(self, value): + self._check_camera_open() + self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value + still_stats = property(_get_still_stats, _set_still_stats, doc="""\ + Retrieves or sets whether statistics will be calculated from still + frames or the prior preview frame. + + When queried, the :attr:`still_stats` property returns a boolean value + indicating when scene statistics will be calculated for still captures + (that is, captures where the *use_video_port* parameter of + :meth:`capture` is ``False``). When this property is ``False`` (the + default), statistics will be calculated from the preceding preview + frame (this also applies when the preview is not visible). When `True`, + statistics will be calculated from the captured image itself. + + When set, the propetry controls when scene statistics will be + calculated for still captures. The property can be set while recordings + or previews are in progress. The default value is ``False``. + + The advantages to calculating scene statistics from the captured image + are that time between startup and capture is reduced as only the AGC + (automatic gain control) has to converge. The downside is that + processing time for captures increases and that white balance and gain + won't necessarily match the preview. + + .. warning:: + + Enabling the still statistics pass will `override fixed white + balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`). + + .. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772 + + .. versionadded:: 1.9 + """) + + def _get_saturation(self): + self._check_camera_open() + return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100) + def _set_saturation(self, value): + self._check_camera_open() + if not (-100 <= value <= 100): + raise PiCameraValueError( + "Invalid saturation value: %d (valid range -100..100)" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100) + saturation = property(_get_saturation, _set_saturation, doc="""\ + Retrieves or sets the saturation setting of the camera. + + When queried, the :attr:`saturation` property returns the color + saturation of the camera as an integer between -100 and 100. When set, + the property adjusts the saturation of the camera. Saturation can be + adjusted while previews or recordings are in progress. The default + value is 0. + """) + + def _get_sharpness(self): + self._check_camera_open() + return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100) + def _set_sharpness(self, value): + self._check_camera_open() + if not (-100 <= value <= 100): + raise PiCameraValueError( + "Invalid sharpness value: %d (valid range -100..100)" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100) + sharpness = property(_get_sharpness, _set_sharpness, doc="""\ + Retrieves or sets the sharpness setting of the camera. + + When queried, the :attr:`sharpness` property returns the sharpness + level of the camera (a measure of the amount of post-processing to + reduce or increase image sharpness) as an integer between -100 and 100. + When set, the property adjusts the sharpness of the camera. Sharpness + can be adjusted while previews or recordings are in progress. The + default value is 0. + """) + + def _get_contrast(self): + self._check_camera_open() + return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100) + def _set_contrast(self, value): + self._check_camera_open() + if not (-100 <= value <= 100): + raise PiCameraValueError( + "Invalid contrast value: %d (valid range -100..100)" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100) + contrast = property(_get_contrast, _set_contrast, doc="""\ + Retrieves or sets the contrast setting of the camera. + + When queried, the :attr:`contrast` property returns the contrast level + of the camera as an integer between -100 and 100. When set, the + property adjusts the contrast of the camera. Contrast can be adjusted + while previews or recordings are in progress. The default value is 0. + """) + + def _get_brightness(self): + self._check_camera_open() + return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100) + def _set_brightness(self, value): + self._check_camera_open() + if not (0 <= value <= 100): + raise PiCameraValueError( + "Invalid brightness value: %d (valid range 0..100)" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100) + brightness = property(_get_brightness, _set_brightness, doc="""\ + Retrieves or sets the brightness setting of the camera. + + When queried, the :attr:`brightness` property returns the brightness + level of the camera as an integer between 0 and 100. When set, the + property adjusts the brightness of the camera. Brightness can be + adjusted while previews or recordings are in progress. The default + value is 50. + """) + + def _get_shutter_speed(self): + self._check_camera_open() + return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED]) + def _set_shutter_speed(self, value): + self._check_camera_open() + self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value + shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\ + Retrieves or sets the shutter speed of the camera in microseconds. + + When queried, the :attr:`shutter_speed` property returns the shutter + speed of the camera in microseconds, or 0 which indicates that the + speed will be automatically determined by the auto-exposure algorithm. + Faster shutter times naturally require greater amounts of illumination + and vice versa. + + When set, the property adjusts the shutter speed of the camera, which + most obviously affects the illumination of subsequently captured + images. Shutter speed can be adjusted while previews or recordings are + running. The default value is 0 (auto). + + .. note:: + + You can query the :attr:`exposure_speed` attribute to determine the + actual shutter speed being used when this attribute is set to 0. + Please note that this capability requires an up to date firmware + (#692 or later). + + .. note:: + + In later firmwares, this attribute is limited by the value of the + :attr:`framerate` attribute. For example, if framerate is set to + 30fps, the shutter speed cannot be slower than 33,333µs (1/fps). + """) + + def _get_exposure_speed(self): + self._check_camera_open() + return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure + exposure_speed = property(_get_exposure_speed, doc="""\ + Retrieves the current shutter speed of the camera. + + When queried, this property returns the shutter speed currently being + used by the camera. If you have set :attr:`shutter_speed` to a non-zero + value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be + equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you + can read the actual shutter speed being used from this attribute. The + value is returned as an integer representing a number of microseconds. + This is a read-only property. + + .. versionadded:: 1.6 + """) + + def _get_analog_gain(self): + self._check_camera_open() + return mo.to_fraction( + self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain) + analog_gain = property(_get_analog_gain, doc="""\ + Retrieves the current analog gain of the camera. + + When queried, this property returns the analog gain currently being + used by the camera. The value represents the analog gain of the sensor + prior to digital conversion. The value is returned as a + :class:`~fractions.Fraction` instance. + + .. versionadded:: 1.6 + """) + + def _get_digital_gain(self): + self._check_camera_open() + return mo.to_fraction( + self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain) + digital_gain = property(_get_digital_gain, doc="""\ + Retrieves the current digital gain of the camera. + + When queried, this property returns the digital gain currently being + used by the camera. The value represents the digital gain the camera + applies after conversion of the sensor's analog output. The value is + returned as a :class:`~fractions.Fraction` instance. + + .. versionadded:: 1.6 + """) + + def _get_video_denoise(self): + self._check_camera_open() + return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] + def _set_video_denoise(self, value): + self._check_camera_open() + self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value + video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\ + Retrieves or sets whether denoise will be applied to video recordings. + + When queried, the :attr:`video_denoise` property returns a boolean + value indicating whether or not the camera software will apply a + denoise algorithm to video recordings. + + When set, the property activates or deactivates the denoise algorithm + for video recordings. The property can be set while recordings or + previews are in progress. The default value is ``True``. + + .. versionadded:: 1.7 + """) + + def _get_image_denoise(self): + self._check_camera_open() + return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] + def _set_image_denoise(self, value): + self._check_camera_open() + self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value + image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\ + Retrieves or sets whether denoise will be applied to image captures. + + When queried, the :attr:`image_denoise` property returns a boolean + value indicating whether or not the camera software will apply a + denoise algorithm to image captures. + + When set, the property activates or deactivates the denoise algorithm + for image captures. The property can be set while recordings or + previews are in progress. The default value is ``True``. + + .. versionadded:: 1.7 + """) + + def _get_drc_strength(self): + self._check_camera_open() + return self._DRC_STRENGTHS_R[ + self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength + ] + def _set_drc_strength(self, value): + self._check_camera_open() + try: + mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] + mp.strength = self.DRC_STRENGTHS[value] + except KeyError: + raise PiCameraValueError( + "Invalid dynamic range compression strength: %s" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp + drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\ + Retrieves or sets the dynamic range compression strength of the camera. + + When queried, the :attr:`drc_strength` property returns a string + indicating the amount of `dynamic range compression`_ the camera + applies to images. + + When set, the attributes adjusts the strength of the dynamic range + compression applied to the camera's output. Valid values are given + in the list below: + + {values} + + The default value is ``'off'``. All possible values for the attribute + can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute. + + .. warning:: + + Enabling DRC will `override fixed white balance`_ gains (set via + :attr:`awb_gains` and :attr:`awb_mode`). + + .. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression + .. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772 + + .. versionadded:: 1.6 + """.format(values=docstring_values(DRC_STRENGTHS))) + + def _get_ISO(self): + warnings.warn( + PiCameraDeprecated( + 'PiCamera.ISO is deprecated; use PiCamera.iso instead')) + return self.iso + def _set_ISO(self, value): + warnings.warn( + PiCameraDeprecated( + 'PiCamera.ISO is deprecated; use PiCamera.iso instead')) + self.iso = value + ISO = property(_get_ISO, _set_ISO, doc=""" + Retrieves or sets the apparent ISO setting of the camera. + + .. deprecated:: 1.8 + Please use the :attr:`iso` attribute instead. + """) + + def _get_iso(self): + self._check_camera_open() + return self._camera.control.params[mmal.MMAL_PARAMETER_ISO] + def _set_iso(self, value): + self._check_camera_open() + try: + if not (0 <= value <= 1600): + raise PiCameraValueError( + "Invalid iso value: %d (valid range 0..800)" % value) + except TypeError: + raise PiCameraValueError("Invalid iso value: %s" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value + iso = property(_get_iso, _set_iso, doc="""\ + Retrieves or sets the apparent ISO setting of the camera. + + When queried, the :attr:`iso` property returns the ISO setting of the + camera, a value which represents the `sensitivity of the camera to + light`_. Lower values (e.g. 100) imply less sensitivity than higher + values (e.g. 400 or 800). Lower sensitivities tend to produce less + "noisy" (smoother) images, but operate poorly in low light conditions. + + When set, the property adjusts the sensitivity of the camera (by + adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid + values are between 0 (auto) and 1600. The actual value used when iso is + explicitly set will be one of the following values (whichever is + closest): 100, 200, 320, 400, 500, 640, 800. + + .. note:: + + Some users on the Pi camera forum have noted that higher ISO values + than 800 (specifically up to 1600) can be achieved in certain + conditions with :attr:`exposure_mode` set to ``'sports'`` and + :attr:`iso` set to 0. It doesn't appear to be possible to manually + request an ISO setting higher than 800, but the picamera library + will permit settings up to 1600 in case the underlying firmware + permits such settings in particular circumstances. + + On the V1 camera module, non-zero ISO values attempt to fix overall + gain at various levels. For example, ISO 100 attempts to provide an + overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0, + etc. The algorithm prefers analog gain over digital gain to reduce + noise. + + On the V2 camera module, ISO 100 attempts to produce overall gain of + ~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2 + camera module was calibrated against the `ISO film speed`_ standard). + + The attribute can be adjusted while previews or recordings are in + progress. The default value is 0 which means automatically determine a + value according to image-taking conditions. + + .. note:: + + Certain :attr:`exposure_mode` values override the ISO setting. For + example, ``'off'`` fixes :attr:`analog_gain` and + :attr:`digital_gain` entirely, preventing this property from + adjusting them when set. + + .. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital + .. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO + """) + + def _get_meter_mode(self): + self._check_camera_open() + return self._METER_MODES_R[ + self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value + ] + def _set_meter_mode(self, value): + self._check_camera_open() + try: + mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] + mp.value = self.METER_MODES[value] + except KeyError: + raise PiCameraValueError("Invalid metering mode: %s" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp + meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\ + Retrieves or sets the metering mode of the camera. + + When queried, the :attr:`meter_mode` property returns the method by + which the camera `determines the exposure`_ as one of the following + strings: + + {values} + + When set, the property adjusts the camera's metering mode. All modes + set up two regions: a center region, and an outer region. The major + `difference between each mode`_ is the size of the center region. The + ``'backlit'`` mode has the largest central region (30% of the width), + while ``'spot'`` has the smallest (10% of the width). + + The property can be set while recordings or previews are in progress. + The default value is ``'average'``. All possible values for the + attribute can be obtained from the ``PiCamera.METER_MODES`` attribute. + + .. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode + .. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644 + """.format(values=docstring_values(METER_MODES))) + + def _get_video_stabilization(self): + self._check_camera_open() + return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] + def _set_video_stabilization(self, value): + self._check_camera_open() + self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value + video_stabilization = property( + _get_video_stabilization, _set_video_stabilization, doc="""\ + Retrieves or sets the video stabilization mode of the camera. + + When queried, the :attr:`video_stabilization` property returns a + boolean value indicating whether or not the camera attempts to + compensate for motion. + + When set, the property activates or deactivates video stabilization. + The property can be set while recordings or previews are in progress. + The default value is ``False``. + + .. note:: + + The built-in video stabilization only accounts for `vertical and + horizontal motion`_, not rotation. + + .. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667 + """) + + def _get_exposure_compensation(self): + self._check_camera_open() + return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] + def _set_exposure_compensation(self, value): + self._check_camera_open() + try: + if not (-25 <= value <= 25): + raise PiCameraValueError( + "Invalid exposure compensation value: " + "%d (valid range -25..25)" % value) + except TypeError: + raise PiCameraValueError( + "Invalid exposure compensation value: %s" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value + exposure_compensation = property( + _get_exposure_compensation, _set_exposure_compensation, doc="""\ + Retrieves or sets the exposure compensation level of the camera. + + When queried, the :attr:`exposure_compensation` property returns an + integer value between -25 and 25 indicating the exposure level of the + camera. Larger values result in brighter images. + + When set, the property adjusts the camera's exposure compensation + level. Each increment represents 1/6th of a stop. Hence setting the + attribute to 6 increases exposure by 1 stop. The property can be set + while recordings or previews are in progress. The default value is 0. + """) + + def _get_exposure_mode(self): + self._check_camera_open() + return self._EXPOSURE_MODES_R[ + self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value + ] + def _set_exposure_mode(self, value): + self._check_camera_open() + try: + mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] + mp.value = self.EXPOSURE_MODES[value] + except KeyError: + raise PiCameraValueError("Invalid exposure mode: %s" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp + exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\ + Retrieves or sets the exposure mode of the camera. + + When queried, the :attr:`exposure_mode` property returns a string + representing the exposure setting of the camera. The possible values + can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and + are as follows: + + {values} + + When set, the property adjusts the camera's exposure mode. The + property can be set while recordings or previews are in progress. The + default value is ``'auto'``. + + .. note:: + + Exposure mode ``'off'`` is special: this disables the camera's + automatic gain control, fixing the values of :attr:`digital_gain` + and :attr:`analog_gain`. + + Please note that these properties are not directly settable + (although they can be influenced by setting :attr:`iso` *prior* to + fixing the gains), and default to low values when the camera is + first initialized. Therefore it is important to let them settle on + higher values before disabling automatic gain control otherwise all + frames captured will appear black. + """.format(values=docstring_values(EXPOSURE_MODES))) + + def _get_flash_mode(self): + self._check_camera_open() + return self._FLASH_MODES_R[ + self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value + ] + def _set_flash_mode(self, value): + self._check_camera_open() + try: + mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] + mp.value = self.FLASH_MODES[value] + except KeyError: + raise PiCameraValueError("Invalid flash mode: %s" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp + flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\ + Retrieves or sets the flash mode of the camera. + + When queried, the :attr:`flash_mode` property returns a string + representing the flash setting of the camera. The possible values can + be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as + follows: + + {values} + + When set, the property adjusts the camera's flash mode. The property + can be set while recordings or previews are in progress. The default + value is ``'off'``. + + .. note:: + + You must define which GPIO pins the camera is to use for flash and + privacy indicators. This is done within the `Device Tree + configuration`_ which is considered an advanced topic. + Specifically, you need to define pins ``FLASH_0_ENABLE`` and + optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More + information can be found in this :ref:`recipe + `. + + .. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md + + .. versionadded:: 1.10 + """.format(values=docstring_values(FLASH_MODES))) + + def _get_awb_mode(self): + self._check_camera_open() + return self._AWB_MODES_R[ + self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value + ] + def _set_awb_mode(self, value): + self._check_camera_open() + try: + mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] + mp.value = self.AWB_MODES[value] + except KeyError: + raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp + awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\ + Retrieves or sets the auto-white-balance mode of the camera. + + When queried, the :attr:`awb_mode` property returns a string + representing the auto white balance setting of the camera. The possible + values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and + are as follows: + + {values} + + When set, the property adjusts the camera's auto-white-balance mode. + The property can be set while recordings or previews are in progress. + The default value is ``'auto'``. + + .. note:: + + AWB mode ``'off'`` is special: this disables the camera's automatic + white balance permitting manual control of the white balance via + the :attr:`awb_gains` property. However, even with AWB disabled, + some attributes (specifically :attr:`still_stats` and + :attr:`drc_strength`) can cause AWB re-calculations. + """.format(values=docstring_values(AWB_MODES))) + + def _get_awb_gains(self): + self._check_camera_open() + mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS] + return ( + mo.to_fraction(mp.awb_red_gain), + mo.to_fraction(mp.awb_blue_gain), + ) + def _set_awb_gains(self, value): + self._check_camera_open() + try: + red_gain, blue_gain = value + except (ValueError, TypeError): + red_gain = blue_gain = value + if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0): + raise PiCameraValueError( + "Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % ( + red_gain, blue_gain)) + mp = mmal.MMAL_PARAMETER_AWB_GAINS_T( + mmal.MMAL_PARAMETER_HEADER_T( + mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS, + ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T) + ), + mo.to_rational(red_gain), + mo.to_rational(blue_gain), + ) + self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp + awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\ + Gets or sets the auto-white-balance gains of the camera. + + When queried, this attribute returns a tuple of values representing + the `(red, blue)` balance of the camera. The `red` and `blue` values + are returned :class:`~fractions.Fraction` instances. The values will + be between 0.0 and 8.0. + + When set, this attribute adjusts the camera's auto-white-balance gains. + The property can be specified as a single value in which case both red + and blue gains will be adjusted equally, or as a `(red, blue)` tuple. + Values can be specified as an :ref:`int `, :ref:`float + ` or :class:`~fractions.Fraction` and each gain must be + between 0.0 and 8.0. Typical values for the gains are between 0.9 and + 1.9. The property can be set while recordings or previews are in + progress. + + .. note:: + + This attribute only has an effect when :attr:`awb_mode` is set to + ``'off'``. Also note that even with AWB disabled, some attributes + (specifically :attr:`still_stats` and :attr:`drc_strength`) can + cause AWB re-calculations. + + .. versionchanged:: 1.6 + Prior to version 1.6, this attribute was write-only. + """) + + def _get_image_effect(self): + self._check_camera_open() + return self._IMAGE_EFFECTS_R[ + self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value + ] + def _set_image_effect(self, value): + self._check_camera_open() + try: + mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] + mp.value = self.IMAGE_EFFECTS[value] + self._image_effect_params = None + except KeyError: + raise PiCameraValueError("Invalid image effect: %s" % value) + self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp + image_effect = property(_get_image_effect, _set_image_effect, doc="""\ + Retrieves or sets the current image effect applied by the camera. + + When queried, the :attr:`image_effect` property returns a string + representing the effect the camera will apply to captured video. The + possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS`` + attribute, and are as follows: + + {values} + + When set, the property changes the effect applied by the camera. The + property can be set while recordings or previews are in progress, but + only certain effects work while recording video (notably ``'negative'`` + and ``'solarize'``). The default value is ``'none'``. + """.format(values=docstring_values(IMAGE_EFFECTS))) + + def _get_image_effect_params(self): + self._check_camera_open() + return self._image_effect_params + def _set_image_effect_params(self, value): + self._check_camera_open() + to_int = lambda x: int(x) + to_byte = lambda x: max(0, min(255, int(x))) + to_bool = lambda x: (0, 1)[bool(x)] + to_8dot8 = lambda x: int(x * 256) + valid_transforms = { + 'solarize': [ + (to_bool, to_byte, to_byte, to_byte, to_byte), + (to_byte, to_byte, to_byte, to_byte), + (to_bool,), + ], + 'colorpoint': [ + (lambda x: max(0, min(3, int(x))),), + ], + 'colorbalance': [ + (to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int), + (to_8dot8, to_8dot8, to_8dot8, to_8dot8), + (to_8dot8, to_8dot8, to_8dot8), + ], + 'colorswap': [ + (to_bool,), + ], + 'posterise': [ + (lambda x: max(2, min(31, int(x))),), + ], + 'blur': [ + (lambda x: max(1, min(2, int(x))),), + ], + 'film': [ + (to_byte, to_byte, to_byte), + ], + 'watercolor': [ + (), + (to_byte, to_byte), + ] + } + # Ensure params is a tuple + try: + params = tuple(i for i in value) + except TypeError: + params = (value,) + # Find the parameter combination for the current effect + effect = self.image_effect + param_transforms = [ + transforms for transforms in valid_transforms.get(effect, []) + if len(transforms) == len(params) + ] + if not param_transforms: + raise PiCameraValueError( + 'invalid set of parameters for effect "%s"' % effect) + param_transforms = param_transforms[0] + params = tuple( + transform(p) + for (transform, p) in zip(param_transforms, params) + ) + mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T( + mmal.MMAL_PARAMETER_HEADER_T( + mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS, + ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T) + ), + effect=self.IMAGE_EFFECTS[effect], + num_effect_params=len(params), + effect_parameter=params, + ) + self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp + self._image_effect_params = value + image_effect_params = property( + _get_image_effect_params, _set_image_effect_params, doc="""\ + Retrieves or sets the parameters for the current :attr:`effect + `. + + When queried, the :attr:`image_effect_params` property either returns + ``None`` (for effects which have no configurable parameters, or if no + parameters have been configured), or a tuple of numeric values up to + six elements long. + + When set, the property changes the parameters of the current + :attr:`effect ` as a sequence of numbers, or a single + number. Attempting to set parameters on an effect which does not + support parameters, or providing an incompatible set of parameters for + an effect will raise a :exc:`PiCameraValueError` exception. + + The effects which have parameters, and what combinations those + parameters can take is as follows: + + .. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}| + + +--------------------+----------------+-----------------------------------------+ + | Effect | Parameters | Description | + +====================+================+=========================================+ + | ``'solarize'`` | *yuv*, | *yuv* controls whether data is | + | | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input | + | | *y2*, *y3* | values from 0 to *x0* - 1 are remapped | + | | | linearly onto the range 0 to *y0*. | + | | | Values from *x0* to 255 are remapped | + | | | linearly onto the range *y1* to *y2*. | + | +----------------+-----------------------------------------+ + | | *x0*, *y0*, | Same as above, but *yuv* defaults to | + | | *y1*, *y2* | 0 (process as RGB). | + | +----------------+-----------------------------------------+ + | | *yuv* | Same as above, but *x0*, *y0*, *y1*, | + | | | *y2* default to 128, 128, 128, 0 | + | | | respectively. | + +--------------------+----------------+-----------------------------------------+ + | ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant | + | | | of the U/V space to retain chroma | + | | | from: 0=green, 1=red/yellow, 2=blue, | + | | | 3=purple. There is no default; this | + | | | effect does nothing until parameters | + | | | are set. | + +--------------------+----------------+-----------------------------------------+ + | ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading | + | | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 | + | | *u*, *v* | indicates lens shading has no effect). | + | | | *r*, *g*, *b* are multipliers for their | + | | | respective color channels (0.0 to | + | | | 256.0). *u* and *v* are offsets added | + | | | to the U/V plane (0 to 255). | + | +----------------+-----------------------------------------+ + | | *lens*, | Same as above but *u* are defaulted | + | | *r*, *g*, *b* | to 0. | + | +----------------+-----------------------------------------+ + | | *lens*, | Same as above but *g* also defaults to | + | | *r*, *b* | to 1.0. | + +--------------------+----------------+-----------------------------------------+ + | ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If | + | | | *dir* is 1, swap RGB to BRG. | + +--------------------+----------------+-----------------------------------------+ + | ``'posterise'`` | *steps* | Control the quantization steps for the | + | | | image. Valid values are 2 to 32, and | + | | | the default is 4. | + +--------------------+----------------+-----------------------------------------+ + | ``'blur'`` | *size* | Specifies the size of the kernel. Valid | + | | | values are 1 or 2. | + +--------------------+----------------+-----------------------------------------+ + | ``'film'`` | *strength*, | *strength* specifies the strength of | + | | *u*, *v* | effect. *u* and *v* are offsets added | + | | | to the U/V plane (0 to 255). | + +--------------------+----------------+-----------------------------------------+ + | ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to | + | | | the U/V plane (0 to 255). | + | +----------------+-----------------------------------------+ + | | | No parameters indicates no U/V effect. | + +--------------------+----------------+-----------------------------------------+ + + .. versionadded:: 1.8 + """) + + def _get_color_effects(self): + self._check_camera_open() + mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] + if mp.enable != mmal.MMAL_FALSE: + return (mp.u, mp.v) + else: + return None + def _set_color_effects(self, value): + self._check_camera_open() + if value is None: + enable = mmal.MMAL_FALSE + u = v = 128 + else: + enable = mmal.MMAL_TRUE + try: + u, v = value + except (TypeError, ValueError) as e: + raise PiCameraValueError( + "Invalid color effect (u, v) tuple: %s" % value) + if not ((0 <= u <= 255) and (0 <= v <= 255)): + raise PiCameraValueError( + "(u, v) values must be between 0 and 255") + mp = mmal.MMAL_PARAMETER_COLOURFX_T( + mmal.MMAL_PARAMETER_HEADER_T( + mmal.MMAL_PARAMETER_COLOUR_EFFECT, + ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T) + ), + enable, u, v + ) + self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp + color_effects = property(_get_color_effects, _set_color_effects, doc="""\ + Retrieves or sets the current color effect applied by the camera. + + When queried, the :attr:`color_effects` property either returns + ``None`` which indicates that the camera is using normal color + settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer + values between 0 and 255. + + When set, the property changes the color effect applied by the camera. + The property can be set while recordings or previews are in progress. + For example, to make the image black and white set the value to ``(128, + 128)``. The default value is ``None``. + """) + + def _get_rotation(self): + self._check_camera_open() + return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION] + def _set_rotation(self, value): + self._check_camera_open() + try: + value = ((int(value) % 360) // 90) * 90 + except ValueError: + raise PiCameraValueError("Invalid rotation angle: %s" % value) + for port in self._camera.outputs: + port.params[mmal.MMAL_PARAMETER_ROTATION] = value + rotation = property(_get_rotation, _set_rotation, doc="""\ + Retrieves or sets the current rotation of the camera's image. + + When queried, the :attr:`rotation` property returns the rotation + applied to the image. Valid values are 0, 90, 180, and 270. + + When set, the property changes the rotation applied to the camera's + input. The property can be set while recordings or previews are in + progress. The default value is ``0``. + """) + + def _get_vflip(self): + self._check_camera_open() + return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in ( + mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH) + def _set_vflip(self, value): + self._check_camera_open() + value = { + (False, False): mmal.MMAL_PARAM_MIRROR_NONE, + (True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL, + (False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL, + (True, True): mmal.MMAL_PARAM_MIRROR_BOTH, + }[(bool(value), self.hflip)] + for port in self._camera.outputs: + port.params[mmal.MMAL_PARAMETER_MIRROR] = value + vflip = property(_get_vflip, _set_vflip, doc="""\ + Retrieves or sets whether the camera's output is vertically flipped. + + When queried, the :attr:`vflip` property returns a boolean indicating + whether or not the camera's output is vertically flipped. The property + can be set while recordings or previews are in progress. The default + value is ``False``. + """) + + def _get_hflip(self): + self._check_camera_open() + return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in ( + mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH) + def _set_hflip(self, value): + self._check_camera_open() + value = { + (False, False): mmal.MMAL_PARAM_MIRROR_NONE, + (True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL, + (False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL, + (True, True): mmal.MMAL_PARAM_MIRROR_BOTH, + }[(self.vflip, bool(value))] + for port in self._camera.outputs: + port.params[mmal.MMAL_PARAMETER_MIRROR] = value + hflip = property(_get_hflip, _set_hflip, doc="""\ + Retrieves or sets whether the camera's output is horizontally flipped. + + When queried, the :attr:`hflip` property returns a boolean indicating + whether or not the camera's output is horizontally flipped. The + property can be set while recordings or previews are in progress. The + default value is ``False``. + """) + + def _get_zoom(self): + self._check_camera_open() + mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] + return ( + mp.rect.x / 65535.0, + mp.rect.y / 65535.0, + mp.rect.width / 65535.0, + mp.rect.height / 65535.0, + ) + def _set_zoom(self, value): + self._check_camera_open() + try: + x, y, w, h = value + except (TypeError, ValueError) as e: + raise PiCameraValueError( + "Invalid zoom rectangle (x, y, w, h) tuple: %s" % value) + mp = mmal.MMAL_PARAMETER_INPUT_CROP_T( + mmal.MMAL_PARAMETER_HEADER_T( + mmal.MMAL_PARAMETER_INPUT_CROP, + ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T) + ), + mmal.MMAL_RECT_T( + max(0, min(65535, int(65535 * x))), + max(0, min(65535, int(65535 * y))), + max(0, min(65535, int(65535 * w))), + max(0, min(65535, int(65535 * h))), + ), + ) + self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp + zoom = property(_get_zoom, _set_zoom, doc="""\ + Retrieves or sets the zoom applied to the camera's input. + + When queried, the :attr:`zoom` property returns a ``(x, y, w, h)`` + tuple of floating point values ranging from 0.0 to 1.0, indicating the + proportion of the image to include in the output (this is also known as + the "Region of Interest" or ROI). The default value is ``(0.0, 0.0, + 1.0, 1.0)`` which indicates that everything should be included. The + property can be set while recordings or previews are in progress. + + The `zoom` is applied to the processed image, after rotation and rescale. + If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead. + The values `w` and `h` can modify the aspect ratio of the image: use equal + values for `w` and `h` if you want to keep the same the aspect ratio. + """) + + def _get_crop(self): + warnings.warn( + PiCameraDeprecated( + 'PiCamera.crop is deprecated; use PiCamera.zoom instead')) + return self.zoom + def _set_crop(self, value): + warnings.warn( + PiCameraDeprecated( + 'PiCamera.crop is deprecated; use PiCamera.zoom instead')) + self.zoom = value + crop = property(_get_crop, _set_crop, doc=""" + Retrieves or sets the zoom applied to the camera's input. + + .. deprecated:: 1.8 + Please use the :attr:`zoom` attribute instead. + """) + + def _get_overlays(self): + self._check_camera_open() + return self._overlays + overlays = property(_get_overlays, doc="""\ + Retrieves all active :class:`PiRenderer` overlays. + + If no overlays are current active, :attr:`overlays` will return an + empty iterable. Otherwise, it will return an iterable of + :class:`PiRenderer` instances which are currently acting as overlays. + Note that the preview renderer is an exception to this: it is *not* + included as an overlay despite being derived from :class:`PiRenderer`. + + .. versionadded:: 1.8 + """) + + def _get_preview(self): + self._check_camera_open() + if isinstance(self._preview, PiPreviewRenderer): + return self._preview + preview = property(_get_preview, doc="""\ + Retrieves the :class:`PiRenderer` displaying the camera preview. + + If no preview is currently active, :attr:`preview` will return + ``None``. Otherwise, it will return the instance of + :class:`PiRenderer` which is currently connected to the camera's + preview port for rendering what the camera sees. You can use the + attributes of the :class:`PiRenderer` class to configure the appearance + of the preview. For example, to make the preview semi-transparent:: + + import picamera + + with picamera.PiCamera() as camera: + camera.start_preview() + camera.preview.alpha = 128 + + .. versionadded:: 1.8 + """) + + def _get_preview_alpha(self): + self._check_camera_open() + warnings.warn( + PiCameraDeprecated( + 'PiCamera.preview_alpha is deprecated; use ' + 'PiCamera.preview.alpha instead')) + if self.preview: + return self.preview.alpha + else: + return self._preview_alpha + def _set_preview_alpha(self, value): + self._check_camera_open() + warnings.warn( + PiCameraDeprecated( + 'PiCamera.preview_alpha is deprecated; use ' + 'PiCamera.preview.alpha instead')) + if self.preview: + self.preview.alpha = value + else: + self._preview_alpha = value + preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\ + Retrieves or sets the opacity of the preview window. + + .. deprecated:: 1.8 + Please use the :attr:`~PiRenderer.alpha` attribute of the + :attr:`preview` object instead. + """) + + def _get_preview_layer(self): + self._check_camera_open() + warnings.warn( + PiCameraDeprecated( + 'PiCamera.preview_layer is deprecated; ' + 'use PiCamera.preview.layer instead')) + if self.preview: + return self.preview.layer + else: + return self._preview_layer + def _set_preview_layer(self, value): + self._check_camera_open() + warnings.warn( + PiCameraDeprecated( + 'PiCamera.preview_layer is deprecated; ' + 'use PiCamera.preview.layer instead')) + if self.preview: + self.preview.layer = value + else: + self._preview_layer = value + preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\ + Retrieves or sets the layer of the preview window. + + .. deprecated:: 1.8 + Please use the :attr:`~PiRenderer.layer` attribute of the + :attr:`preview` object instead. + """) + + def _get_preview_fullscreen(self): + self._check_camera_open() + warnings.warn( + PiCameraDeprecated( + 'PiCamera.preview_fullscreen is deprecated; ' + 'use PiCamera.preview.fullscreen instead')) + if self.preview: + return self.preview.fullscreen + else: + return self._preview_fullscreen + def _set_preview_fullscreen(self, value): + self._check_camera_open() + warnings.warn( + PiCameraDeprecated( + 'PiCamera.preview_fullscreen is deprecated; ' + 'use PiCamera.preview.fullscreen instead')) + if self.preview: + self.preview.fullscreen = value + else: + self._preview_fullscreen = value + preview_fullscreen = property( + _get_preview_fullscreen, _set_preview_fullscreen, doc="""\ + Retrieves or sets full-screen for the preview window. + + .. deprecated:: 1.8 + Please use the :attr:`~PiRenderer.fullscreen` attribute of the + :attr:`preview` object instead. + """) + + def _get_preview_window(self): + self._check_camera_open() + warnings.warn( + PiCameraDeprecated( + 'PiCamera.preview_window is deprecated; ' + 'use PiCamera.preview.window instead')) + if self.preview: + return self.preview.window + else: + return self._preview_window + def _set_preview_window(self, value): + self._check_camera_open() + warnings.warn( + PiCameraDeprecated( + 'PiCamera.preview_window is deprecated; ' + 'use PiCamera.preview.window instead')) + if self.preview: + self.preview.window = value + else: + self._preview_window = value + preview_window = property( + _get_preview_window, _set_preview_window, doc="""\ + Retrieves or sets the size of the preview window. + + .. deprecated:: 1.8 + Please use the :attr:`~PiRenderer.window` attribute of the + :attr:`preview` object instead. + """) + + def _get_annotate_text(self): + self._check_camera_open() + mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] + if mp.enable: + return mp.text.decode('ascii') + else: + return '' + def _set_annotate_text(self, value): + self._check_camera_open() + mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] + mp.enable = bool(value or mp.show_frame_num) + if mp.enable: + try: + mp.text = value.encode('ascii') + except ValueError as e: + raise PiCameraValueError(str(e)) + self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp + annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\ + Retrieves or sets a text annotation for all output. + + When queried, the :attr:`annotate_text` property returns the current + annotation (if no annotation has been set, this is simply a blank + string). + + When set, the property immediately applies the annotation to the + preview (if it is running) and to any future captures or video + recording. Strings longer than 255 characters, or strings containing + non-ASCII characters will raise a :exc:`PiCameraValueError`. The + default value is ``''``. + + .. versionchanged:: 1.8 + Text annotations can now be 255 characters long. The prior limit + was 32 characters. + """) + + def _get_annotate_frame_num(self): + self._check_camera_open() + mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] + return mp.show_frame_num.value != mmal.MMAL_FALSE + def _set_annotate_frame_num(self, value): + self._check_camera_open() + mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] + mp.enable = bool(value or mp.text) + mp.show_frame_num = bool(value) + self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp + annotate_frame_num = property( + _get_annotate_frame_num, _set_annotate_frame_num, doc="""\ + Controls whether the current frame number is drawn as an annotation. + + The :attr:`annotate_frame_num` attribute is a bool indicating whether + or not the current frame number is rendered as an annotation, similar + to :attr:`annotate_text`. The default is ``False``. + + .. versionadded:: 1.8 + """) + + def _get_annotate_text_size(self): + self._check_camera_open() + if self._camera.annotate_rev == 3: + mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] + return mp.text_size or self.DEFAULT_ANNOTATE_SIZE + else: + return self.DEFAULT_ANNOTATE_SIZE + def _set_annotate_text_size(self, value): + self._check_camera_open() + if not (6 <= value <= 160): + raise PiCameraValueError( + "Invalid annotation text size: %d (valid range 6-160)" % value) + if self._camera.annotate_rev == 3: + mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] + mp.text_size = value + self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp + elif value != self.DEFAULT_ANNOTATE_SIZE: + warnings.warn( + PiCameraFallback( + "Firmware does not support setting annotation text " + "size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE)) + annotate_text_size = property( + _get_annotate_text_size, _set_annotate_text_size, doc="""\ + Controls the size of the annotation text. + + The :attr:`annotate_text_size` attribute is an int which determines how + large the annotation text will appear on the display. Valid values are + in the range 6 to 160, inclusive. The default is {size}. + + .. versionadded:: 1.10 + """.format(size=DEFAULT_ANNOTATE_SIZE)) + + def _get_annotate_foreground(self): + self._check_camera_open() + mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] + if self._camera.annotate_rev == 3 and mp.custom_text_color: + return Color.from_yuv_bytes( + mp.custom_text_Y, + mp.custom_text_U, + mp.custom_text_V) + else: + return Color('white') + def _set_annotate_foreground(self, value): + self._check_camera_open() + if not isinstance(value, Color): + raise PiCameraValueError( + 'annotate_foreground must be a Color') + elif self._camera.annotate_rev < 3: + if value.rgb_bytes != (255, 255, 255): + warnings.warn( + PiCameraFallback( + "Firmware does not support setting a custom foreground " + "annotation color; using white instead")) + return + mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] + mp.custom_text_color = True + ( + mp.custom_text_Y, + mp.custom_text_U, + mp.custom_text_V, + ) = value.yuv_bytes + self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp + annotate_foreground = property( + _get_annotate_foreground, _set_annotate_foreground, doc="""\ + Controls the color of the annotation text. + + The :attr:`annotate_foreground` attribute specifies, partially, the + color of the annotation text. The value is specified as a + :class:`Color`. The default is white. + + .. note:: + + The underlying firmware does not directly support setting all + components of the text color, only the Y' component of a `Y'UV`_ + tuple. This is roughly (but not precisely) analogous to the + "brightness" of a color, so you may choose to think of this as + setting how bright the annotation text will be relative to its + background. In order to specify just the Y' component when setting + this attribute, you may choose to construct the + :class:`Color` instance as follows:: + + camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0) + + .. _Y'UV: https://en.wikipedia.org/wiki/YUV + + .. versionadded:: 1.10 + """) + + def _get_annotate_background(self): + self._check_camera_open() + mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] + if self._camera.annotate_rev == 3: + if mp.enable_text_background: + if mp.custom_background_color: + return Color.from_yuv_bytes( + mp.custom_background_Y, + mp.custom_background_U, + mp.custom_background_V) + else: + return Color('black') + else: + return None + else: + if mp.black_text_background: + return Color('black') + else: + return None + def _set_annotate_background(self, value): + self._check_camera_open() + if value is True: + warnings.warn( + PiCameraDeprecated( + 'Setting PiCamera.annotate_background to True is ' + 'deprecated; use PiCamera.color.Color("black") instead')) + value = Color('black') + elif value is False: + warnings.warn( + PiCameraDeprecated( + 'Setting PiCamera.annotate_background to False is ' + 'deprecated; use None instead')) + value = None + elif value is None: + pass + elif not isinstance(value, Color): + raise PiCameraValueError( + 'annotate_background must be a Color or None') + elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0): + warnings.warn( + PiCameraFallback( + "Firmware does not support setting a custom background " + "annotation color; using black instead")) + mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] + if self._camera.annotate_rev == 3: + if value is None: + mp.enable_text_background = False + else: + mp.enable_text_background = True + mp.custom_background_color = True + ( + mp.custom_background_Y, + mp.custom_background_U, + mp.custom_background_V, + ) = value.yuv_bytes + else: + if value is None: + mp.black_text_background = False + else: + mp.black_text_background = True + self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp + annotate_background = property( + _get_annotate_background, _set_annotate_background, doc="""\ + Controls what background is drawn behind the annotation. + + The :attr:`annotate_background` attribute specifies if a background + will be drawn behind the :attr:`annotation text ` and, + if so, what color it will be. The value is specified as a + :class:`Color` or ``None`` if no background should be drawn. The + default is ``None``. + + .. note:: + + For backward compatibility purposes, the value ``False`` will be + treated as ``None``, and the value ``True`` will be treated as the + color black. The "truthiness" of the values returned by the + attribute are backward compatible although the values themselves + are not. + + .. versionadded:: 1.8 + + .. versionchanged:: 1.10 + In prior versions this was a bool value with ``True`` representing + a black background. + """) + diff --git a/picamera/color.py b/picamera/color.py new file mode 100644 index 0000000..9c46444 --- /dev/null +++ b/picamera/color.py @@ -0,0 +1,50 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python camera library for the Rasperry-Pi camera module +# Copyright (c) 2013-2017 Dave Jones +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import colorzero as c0 + +from .exc import PiCameraDeprecated + + +NAMED_COLORS = c0.tables.NAMED_COLORS +Red = c0.Red +Green = c0.Green +Blue = c0.Blue +Hue = c0.Hue +Lightness = c0.Lightness +Saturation = c0.Saturation + + +class Color(c0.Color): + def __new__(cls, *args, **kwargs): + warnings.warn( + PiCameraDeprecated( + 'The picamera.color module and Color class are deprecated; ' + 'please use the colorzero library (same API) instead')) + return c0.Color.__new__(cls, *args, **kwargs) diff --git a/picamera/display.py b/picamera/display.py new file mode 100644 index 0000000..6e09d1b --- /dev/null +++ b/picamera/display.py @@ -0,0 +1,320 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python camera library for the Rasperry-Pi camera module +# Copyright (c) 2013-2017 Dave Jones +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str equivalent to Py3's +str = type('') + +import mimetypes +import ctypes as ct +from functools import reduce +from operator import or_ + +from . import bcm_host, mmalobj as mo, mmal +from .encoders import PiCookedOneImageEncoder, PiRawOneImageEncoder +from .exc import PiCameraRuntimeError, PiCameraValueError + + +class PiDisplay(object): + __slots__ = ( + '_display', + '_info', + '_transform', + '_exif_tags', + ) + + _ROTATIONS = { + bcm_host.DISPMANX_NO_ROTATE: 0, + bcm_host.DISPMANX_ROTATE_90: 90, + bcm_host.DISPMANX_ROTATE_180: 180, + bcm_host.DISPMANX_ROTATE_270: 270, + } + _ROTATIONS_R = {v: k for k, v in _ROTATIONS.items()} + _ROTATIONS_MASK = reduce(or_, _ROTATIONS.keys(), 0) + + RAW_FORMATS = { + 'yuv', + 'rgb', + 'rgba', + 'bgr', + 'bgra', + } + + def __init__(self, display_num=0): + bcm_host.bcm_host_init() + self._exif_tags = {} + self._display = bcm_host.vc_dispmanx_display_open(display_num) + self._transform = bcm_host.DISPMANX_NO_ROTATE + if not self._display: + raise PiCameraRuntimeError('unable to open display %d' % display_num) + self._info = bcm_host.DISPMANX_MODEINFO_T() + if bcm_host.vc_dispmanx_display_get_info(self._display, self._info): + raise PiCameraRuntimeError('unable to get display info') + + def close(self): + bcm_host.vc_dispmanx_display_close(self._display) + self._display = None + + @property + def closed(self): + return self._display is None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + def _get_output_format(self, output): + """ + Given an output object, attempt to determine the requested format. + + We attempt to determine the filename of the *output* object and derive + a MIME type from the extension. If *output* has no filename, an error + is raised. + """ + if isinstance(output, bytes): + filename = output.decode('utf-8') + elif isinstance(output, str): + filename = output + else: + try: + filename = output.name + except AttributeError: + raise PiCameraValueError( + 'Format must be specified when output has no filename') + (type, encoding) = mimetypes.guess_type(filename, strict=False) + if not type: + raise PiCameraValueError( + 'Unable to determine type from filename %s' % filename) + return type + + def _get_image_format(self, output, format=None): + """ + Given an output object and an optional format, attempt to determine the + requested image format. + + This method is used by all capture methods to determine the requested + output format. If *format* is specified as a MIME-type the "image/" + prefix is stripped. If *format* is not specified, then + :meth:`_get_output_format` will be called to attempt to determine + format from the *output* object. + """ + if isinstance(format, bytes): + format = format.decode('utf-8') + format = format or self._get_output_format(output) + format = ( + format[6:] if format.startswith('image/') else + format) + if format == 'x-ms-bmp': + format = 'bmp' + return format + + def _get_image_encoder(self, output_port, format, resize, **options): + """ + Construct an image encoder for the requested parameters. + + This method is called by :meth:`capture`. The *output_port* parameter + gives the MMAL port that the encoder should read output from. The + *format* parameter indicates the image format and will be one of: + + * ``'jpeg'`` + * ``'png'`` + * ``'gif'`` + * ``'bmp'`` + * ``'yuv'`` + * ``'rgb'`` + * ``'rgba'`` + * ``'bgr'`` + * ``'bgra'`` + + The *resize* parameter indicates the size that the encoder should + resize the output to (presumably by including a resizer in the + pipeline). Finally, *options* includes extra keyword arguments that + should be passed verbatim to the encoder. + """ + encoder_class = ( + PiRawOneImageEncoder if format in self.RAW_FORMATS else + PiCookedOneImageEncoder) + return encoder_class( + self, None, output_port, format, resize, **options) + + def capture(self, output, format=None, resize=None, **options): + format = self._get_image_format(output, format) + if format == 'yuv': + raise PiCameraValueError('YUV format is unsupported at this time') + res = self.resolution + if (self._info.transform & bcm_host.DISPMANX_ROTATE_90) or ( + self._info.transform & bcm_host.DISPMANX_ROTATE_270): + res = res.transpose() + transform = self._transform + if (transform & bcm_host.DISPMANX_ROTATE_90) or ( + transform & bcm_host.DISPMANX_ROTATE_270): + res = res.transpose() + source = mo.MMALPythonSource() + source.outputs[0].format = mmal.MMAL_ENCODING_RGB24 + if format == 'bgr': + source.outputs[0].format = mmal.MMAL_ENCODING_BGR24 + transform |= bcm_host.DISPMANX_SNAPSHOT_SWAP_RED_BLUE + source.outputs[0].framesize = res + source.outputs[0].commit() + encoder = self._get_image_encoder( + source.outputs[0], format, resize, **options) + try: + encoder.start(output) + try: + pitch = res.pad(width=16).width * 3 + image_ptr = ct.c_uint32() + resource = bcm_host.vc_dispmanx_resource_create( + bcm_host.VC_IMAGE_RGB888, res.width, res.height, image_ptr) + if not resource: + raise PiCameraRuntimeError( + 'unable to allocate resource for capture') + try: + buf = source.outputs[0].get_buffer() + if bcm_host.vc_dispmanx_snapshot(self._display, resource, transform): + raise PiCameraRuntimeError('failed to capture snapshot') + rect = bcm_host.VC_RECT_T(0, 0, res.width, res.height) + if bcm_host.vc_dispmanx_resource_read_data(resource, rect, buf._buf[0].data, pitch): + raise PiCameraRuntimeError('failed to read snapshot') + buf._buf[0].length = pitch * res.height + buf._buf[0].flags = ( + mmal.MMAL_BUFFER_HEADER_FLAG_EOS | + mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END + ) + finally: + bcm_host.vc_dispmanx_resource_delete(resource) + source.outputs[0].send_buffer(buf) + # XXX Anything more intelligent than a 10 second default? + encoder.wait(10) + finally: + encoder.stop() + finally: + encoder.close() + + def _calculate_transform(self): + """ + Calculates a reverse transform to undo any that the boot configuration + applies (presumably the user has altered the boot configuration to + match their screen orientation so they want any capture to appear + correctly oriented by default). This is then modified by the transforms + specified in the :attr:`rotation`, :attr:`hflip` and :attr:`vflip` + attributes. + """ + r = PiDisplay._ROTATIONS[self._info.transform & PiDisplay._ROTATIONS_MASK] + r = (360 - r) % 360 # undo the native rotation + r = (r + self.rotation) % 360 # add selected rotation + result = PiDisplay._ROTATIONS_R[r] + result |= self._info.transform & ( # undo flips by re-doing them + bcm_host.DISPMANX_FLIP_HRIZ | bcm_host.DISPMANX_FLIP_VERT + ) + return result + + @property + def resolution(self): + """ + Retrieves the resolution of the display device. + """ + return mo.PiResolution(width=self._info.width, height=self._info.height) + + def _get_hflip(self): + return bool(self._info.transform & bcm_host.DISPMANX_FLIP_HRIZ) + def _set_hflip(self, value): + if value: + self._info.transform |= bcm_host.DISPMANX_FLIP_HRIZ + else: + self._info.transform &= ~bcm_host.DISPMANX_FLIP_HRIZ + hflip = property(_get_hflip, _set_hflip, doc="""\ + Retrieves or sets whether snapshots are horizontally flipped. + + When queried, the :attr:`vflip` property returns a boolean indicating + whether or not the output of :meth:`capture` is horizontally flipped. + The default is ``False``. + + .. note:: + + This property only affects snapshots; it does not affect the + display output itself. + """) + + def _get_vflip(self): + return bool(self._info.transform & bcm_host.DISPMANX_FLIP_VERT) + def _set_vflip(self, value): + if value: + self._info.transform |= bcm_host.DISPMANX_FLIP_VERT + else: + self._info.transform &= ~bcm_host.DISPMANX_FLIP_VERT + vflip = property(_get_vflip, _set_vflip, doc="""\ + Retrieves or sets whether snapshots are vertically flipped. + + When queried, the :attr:`vflip` property returns a boolean indicating + whether or not the output of :meth:`capture` is vertically flipped. The + default is ``False``. + + .. note:: + + This property only affects snapshots; it does not affect the + display output itself. + """) + + def _get_rotation(self): + return PiDisplay._ROTATIONS[self._transform & PiDisplay._ROTATIONS_MASK] + def _set_rotation(self, value): + try: + self._transform = ( + self._transform & ~PiDisplay._ROTATIONS_MASK) | PiDisplay._ROTATIONS_R[value] + except KeyError: + raise PiCameraValueError('invalid rotation %d' % value) + rotation = property(_get_rotation, _set_rotation, doc="""\ + Retrieves or sets the rotation of snapshots. + + When queried, the :attr:`rotation` property returns the rotation + applied to the result of :meth:`capture`. Valid values are 0, 90, 180, + and 270. When set, the property changes the rotation applied to the + result of :meth:`capture`. The default is 0. + + .. note:: + + This property only affects snapshots; it does not affect the + display itself. To rotate the display itself, modify the + ``display_rotate`` value in :file:`/boot/config.txt`. + """) + + def _get_exif_tags(self): + return self._exif_tags + def _set_exif_tags(self, value): + self._exif_tags = {k: v for k, v in value.items()} + exif_tags = property(_get_exif_tags, _set_exif_tags) + diff --git a/picamera/encoders.py b/picamera/encoders.py new file mode 100644 index 0000000..d8f0434 --- /dev/null +++ b/picamera/encoders.py @@ -0,0 +1,1218 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python camera library for the Rasperry-Pi camera module +# Copyright (c) 2013-2017 Dave Jones +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str and range equivalent to Py3's +str = type('') + +import datetime +import threading +import warnings +import ctypes as ct + +from . import bcm_host, mmal, mmalobj as mo +from .frames import PiVideoFrame, PiVideoFrameType +from .exc import ( + PiCameraMMALError, + PiCameraValueError, + PiCameraIOError, + PiCameraRuntimeError, + PiCameraResizerEncoding, + PiCameraAlphaStripping, + PiCameraResolutionRounded, + ) + + +class PiEncoder(object): + """ + Base implementation of an MMAL encoder for use by PiCamera. + + The *parent* parameter specifies the :class:`PiCamera` instance that has + constructed the encoder. The *camera_port* parameter provides the MMAL + camera port that the encoder should enable for capture (this will be the + still or video port of the camera component). The *input_port* parameter + specifies the MMAL port that the encoder should connect to its input. + Sometimes this will be the same as the camera port, but if other components + are present in the pipeline (e.g. a splitter), it may be different. + + The *format* parameter specifies the format that the encoder should + produce in its output. This is specified as a string and will be one of + the following for image encoders: + + * ``'jpeg'`` + * ``'png'`` + * ``'gif'`` + * ``'bmp'`` + * ``'yuv'`` + * ``'rgb'`` + * ``'rgba'`` + * ``'bgr'`` + * ``'bgra'`` + + And one of the following for video encoders: + + * ``'h264'`` + * ``'mjpeg'`` + + The *resize* parameter is either ``None`` (indicating no resizing + should take place), or a ``(width, height)`` tuple specifying the + resolution that the output of the encoder should be resized to. + + Finally, the *options* parameter specifies additional keyword arguments + that can be used to configure the encoder (e.g. bitrate for videos, or + quality for images). + + .. attribute:: camera_port + + The :class:`~mmalobj.MMALVideoPort` that needs to be activated and + deactivated in order to start/stop capture. This is not necessarily the + port that the encoder component's input port is connected to (for + example, in the case of video-port based captures, this will be the + camera video port behind the splitter). + + .. attribute:: encoder + + The :class:`~mmalobj.MMALComponent` representing the encoder, or + ``None`` if no encoder component has been created (some encoder classes + don't use an actual encoder component, for example + :class:`PiRawImageMixin`). + + .. attribute:: event + + A :class:`threading.Event` instance used to synchronize operations + (like start, stop, and split) between the control thread and the + callback thread. + + .. attribute:: exception + + If an exception occurs during the encoder callback, this attribute is + used to store the exception until it can be re-raised in the control + thread. + + .. attribute:: format + + The image or video format that the encoder is expected to produce. This + is equal to the value of the *format* parameter. + + .. attribute:: input_port + + The :class:`~mmalobj.MMALVideoPort` that the encoder should be + connected to. + + .. attribute:: output_port + + The :class:`~mmalobj.MMALVideoPort` that produces the encoder's output. + In the case no encoder component is created, this should be the + camera/component output port responsible for producing data. In other + words, this attribute **must** be set on initialization. + + .. attribute:: outputs + + A mapping of ``key`` to ``(output, opened)`` tuples where ``output`` + is a file-like object, and ``opened`` is a bool indicating whether or + not we opened the output object (and thus whether we are responsible + for eventually closing it). + + .. attribute:: outputs_lock + + A :func:`threading.Lock` instance used to protect access to + :attr:`outputs`. + + .. attribute:: parent + + The :class:`PiCamera` instance that created this PiEncoder instance. + + .. attribute:: pool + + A pointer to a pool of MMAL buffers. + + .. attribute:: resizer + + The :class:`~mmalobj.MMALResizer` component, or ``None`` if no resizer + component has been created. + """ + + DEBUG = 0 + encoder_type = None + + def __init__( + self, parent, camera_port, input_port, format, resize, **options): + self.parent = parent + self.encoder = None + self.resizer = None + self.camera_port = camera_port + self.input_port = input_port + self.output_port = None + self.outputs_lock = threading.Lock() # protects access to self.outputs + self.outputs = {} + self.exception = None + self.event = threading.Event() + try: + if parent and parent.closed: + raise PiCameraRuntimeError("Camera is closed") + if resize: + self._create_resizer(*mo.to_resolution(resize)) + self._create_encoder(format, **options) + if self.encoder: + self.encoder.connection.enable() + if self.resizer: + self.resizer.connection.enable() + except: + self.close() + raise + + def _create_resizer(self, width, height): + """ + Creates and configures an :class:`~mmalobj.MMALResizer` component. + + This is called when the initializer's *resize* parameter is something + other than ``None``. The *width* and *height* parameters are passed to + the constructed resizer. Note that this method only constructs the + resizer - it does not connect it to the encoder. The method sets the + :attr:`resizer` attribute to the constructed resizer component. + """ + self.resizer = mo.MMALResizer() + self.resizer.inputs[0].connect(self.input_port) + self.resizer.outputs[0].copy_from(self.resizer.inputs[0]) + self.resizer.outputs[0].format = mmal.MMAL_ENCODING_I420 + self.resizer.outputs[0].framesize = (width, height) + self.resizer.outputs[0].commit() + + def _create_encoder(self, format): + """ + Creates and configures the :class:`~mmalobj.MMALEncoder` component. + + This method only constructs the encoder; it does not connect it to the + input port. The method sets the :attr:`encoder` attribute to the + constructed encoder component, and the :attr:`output_port` attribute to + the encoder's output port (or the previously constructed resizer's + output port if one has been requested). Descendent classes extend this + method to finalize encoder configuration. + + .. note:: + + It should be noted that this method is called with the + initializer's ``option`` keyword arguments. This base + implementation expects no additional arguments, but descendent + classes extend the parameter list to include options relevant to + them. + """ + assert not self.encoder + self.encoder = self.encoder_type() + self.output_port = self.encoder.outputs[0] + if self.resizer: + self.encoder.inputs[0].connect(self.resizer.outputs[0]) + else: + self.encoder.inputs[0].connect(self.input_port) + self.encoder.outputs[0].copy_from(self.encoder.inputs[0]) + # NOTE: We deliberately don't commit the output port format here as + # this is a base class and the output configuration is incomplete at + # this point. Descendents are expected to finish configuring the + # encoder and then commit the port format themselves + + def _callback(self, port, buf): + """ + The encoder's main callback function. + + When the encoder is active, this method is periodically called in a + background thread. The *port* parameter specifies the :class:`MMALPort` + providing the output (typically this is the encoder's output port, but + in the case of unencoded captures may simply be a camera port), while + the *buf* parameter is an :class:`~mmalobj.MMALBuffer` which can be + used to obtain the data to write, along with meta-data about the + current frame. + + This method must set :attr:`event` when the encoder has finished (and + should set :attr:`exception` if an exception occurred during encoding). + + Developers wishing to write a custom encoder class may find it simpler + to override the :meth:`_callback_write` method, rather than deal with + these complexities. + """ + if self.DEBUG > 1: + print(repr(buf)) + try: + stop = self._callback_write(buf) + except Exception as e: + stop = True + self.exception = e + if stop: + self.event.set() + return stop + + def _callback_write(self, buf, key=PiVideoFrameType.frame): + """ + _callback_write(buf, key=PiVideoFrameType.frame) + + Writes output on behalf of the encoder callback function. + + This method is called by :meth:`_callback` to handle writing to an + object in :attr:`outputs` identified by *key*. The *buf* parameter is + an :class:`~mmalobj.MMALBuffer` which can be used to obtain the data. + The method is expected to return a boolean to indicate whether output + is complete (``True``) or whether more data is expected (``False``). + + The default implementation simply writes the contents of the buffer to + the output identified by *key*, and returns ``True`` if the buffer + flags indicate end of stream. Image encoders will typically override + the return value to indicate ``True`` on end of frame (as they only + wish to output a single image). Video encoders will typically override + this method to determine where key-frames and SPS headers occur. + """ + if buf.length: + with self.outputs_lock: + try: + output = self.outputs[key][0] + written = output.write(buf.data) + except KeyError: + # No output associated with the key type; discard the + # data + pass + else: + # Ignore None return value; most Python 2 streams have + # no return value for write() + if (written is not None) and (written != buf.length): + raise PiCameraIOError( + "Failed to write %d bytes from buffer to " + "output %r" % (buf.length, output)) + return bool(buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_EOS) + + def _open_output(self, output, key=PiVideoFrameType.frame): + """ + _open_output(output, key=PiVideoFrameType.frame) + + Opens *output* and associates it with *key* in :attr:`outputs`. + + If *output* is a string, this method opens it as a filename and keeps + track of the fact that the encoder was the one to open it (which + implies that :meth:`_close_output` should eventually close it). + Otherwise, if *output* has a ``write`` method it is assumed to be a + file-like object and it is used verbatim. If *output* is neither a + string, nor an object with a ``write`` method it is assumed to be a + writeable object supporting the buffer protocol (this is wrapped in + a :class:`BufferIO` stream to simplify writing). + + The opened output is added to the :attr:`outputs` dictionary with the + specified *key*. + """ + with self.outputs_lock: + self.outputs[key] = mo.open_stream(output) + + def _close_output(self, key=PiVideoFrameType.frame): + """ + _close_output(key=PiVideoFrameType.frame) + + Closes the output associated with *key* in :attr:`outputs`. + + Closes the output object associated with the specified *key*, and + removes it from the :attr:`outputs` dictionary (if we didn't open the + object then we attempt to flush it instead). + """ + with self.outputs_lock: + try: + (output, opened) = self.outputs.pop(key) + except KeyError: + pass + else: + mo.close_stream(output, opened) + + @property + def active(self): + """ + Returns ``True`` if the MMAL encoder exists and is enabled. + """ + try: + return bool(self.output_port.enabled) + except AttributeError: + # output_port can be None; avoid a (demonstrated) race condition + # by catching AttributeError + return False + + def start(self, output): + """ + Starts the encoder object writing to the specified output. + + This method is called by the camera to start the encoder capturing + data from the camera to the specified output. The *output* parameter + is either a filename, or a file-like object (for image and video + encoders), or an iterable of filenames or file-like objects (for + multi-image encoders). + """ + self.event.clear() + self.exception = None + self._open_output(output) + with self.parent._encoders_lock: + self.output_port.enable(self._callback) + if self.DEBUG > 0: + mo.print_pipeline(self.output_port) + self.parent._start_capture(self.camera_port) + + def wait(self, timeout=None): + """ + Waits for the encoder to finish (successfully or otherwise). + + This method is called by the owning camera object to block execution + until the encoder has completed its task. If the *timeout* parameter + is None, the method will block indefinitely. Otherwise, the *timeout* + parameter specifies the (potentially fractional) number of seconds + to block for. If the encoder finishes successfully within the timeout, + the method returns ``True``. Otherwise, it returns ``False``. + """ + result = self.event.wait(timeout) + if result: + self.stop() + # Check whether the callback set an exception + if self.exception: + raise self.exception + return result + + def stop(self): + """ + Stops the encoder, regardless of whether it's finished. + + This method is called by the camera to terminate the execution of the + encoder. Typically, this is used with video to stop the recording, but + can potentially be called in the middle of image capture to terminate + the capture. + """ + # NOTE: The active test below is necessary to prevent attempting to + # re-enter the parent lock in the case the encoder is being torn down + # by an error in the constructor + if self.active: + if self.parent and self.camera_port: + with self.parent._encoders_lock: + self.parent._stop_capture(self.camera_port) + self.output_port.disable() + self.event.set() + self._close_output() + + def close(self): + """ + Finalizes the encoder and deallocates all structures. + + This method is called by the camera prior to destroying the encoder (or + more precisely, letting it go out of scope to permit the garbage + collector to destroy it at some future time). The method destroys all + components that the various create methods constructed and resets their + attributes. + """ + self.stop() + if self.encoder: + self.encoder.disconnect() + if self.resizer: + self.resizer.disconnect() + if self.encoder: + self.encoder.close() + self.encoder = None + if self.resizer: + self.resizer.close() + self.resizer = None + self.output_port = None + + +class MMALBufferAlphaStrip(mo.MMALBuffer): + """ + An MMALBuffer descendent that strips alpha bytes from the buffer data. This + is used internally by PiRawMixin when it needs to strip alpha bytes itself + (e.g. because an appropriate format cannot be selected on an output port). + """ + + def __init__(self, buf): + super(MMALBufferAlphaStrip, self).__init__(buf) + self._stripped = bytearray(super(MMALBufferAlphaStrip, self).data) + del self._stripped[3::4] + + @property + def length(self): + return len(self._stripped) + + @property + def data(self): + return self._stripped + + +class PiRawMixin(PiEncoder): + """ + Mixin class for "raw" (unencoded) output. + + This mixin class overrides the initializer of :class:`PiEncoder`, along + with :meth:`_create_resizer` and :meth:`_create_encoder` to configure the + pipeline for unencoded output. Specifically, it disables the construction + of an encoder, and sets the output port to the input port passed to the + initializer, unless resizing is required (either for actual resizing, or + for format conversion) in which case the resizer's output is used. + """ + + RAW_ENCODINGS = { + # name mmal-encoding bytes-per-pixel + 'yuv': (mmal.MMAL_ENCODING_I420, 1.5), + 'rgb': (mmal.MMAL_ENCODING_RGB24, 3), + 'rgba': (mmal.MMAL_ENCODING_RGBA, 4), + 'bgr': (mmal.MMAL_ENCODING_BGR24, 3), + 'bgra': (mmal.MMAL_ENCODING_BGRA, 4), + } + + def __init__( + self, parent, camera_port, input_port, format, resize, **options): + encoding, bpp = self.RAW_ENCODINGS[format] + # Workaround: on older firmwares, non-YUV encodings aren't supported on + # the still port. If a non-YUV format is requested without resizing, + # test whether we can commit the requested format on the input port and + # if this fails, set resize to force resizer usage + if resize is None and encoding != mmal.MMAL_ENCODING_I420: + input_port.format = encoding + try: + input_port.commit() + except PiCameraMMALError as e: + if e.status != mmal.MMAL_EINVAL: + raise + resize = input_port.framesize + warnings.warn( + PiCameraResizerEncoding( + "using a resizer to perform non-YUV encoding; " + "upgrading your firmware with sudo rpi-update " + "may improve performance")) + # Workaround: If a non-alpha format is requested with the resizer, use + # the alpha-inclusive format and set a flag to get the callback to + # strip the alpha bytes + self._strip_alpha = False + if resize: + width, height = resize + try: + format = { + 'rgb': 'rgba', + 'bgr': 'bgra', + }[format] + self._strip_alpha = True + warnings.warn( + PiCameraAlphaStripping( + "using alpha-stripping to convert to non-alpha " + "format; you may find the equivalent alpha format " + "faster")) + except KeyError: + pass + else: + width, height = input_port.framesize + # Workaround (#83): when the resizer is used the width must be aligned + # (both the frame and crop values) to avoid an error when the output + # port format is set (height is aligned too, simply for consistency + # with old picamera versions). Warn the user as they're not going to + # get the resolution they expect + if not resize and format != 'yuv' and input_port.name.startswith('vc.ril.video_splitter'): + # Workaround: Expected frame size is rounded to 16x16 when splitter + # port with no resizer is used and format is not YUV + fwidth = bcm_host.VCOS_ALIGN_UP(width, 16) + else: + fwidth = bcm_host.VCOS_ALIGN_UP(width, 32) + fheight = bcm_host.VCOS_ALIGN_UP(height, 16) + if fwidth != width or fheight != height: + warnings.warn( + PiCameraResolutionRounded( + "frame size rounded up from %dx%d to %dx%d" % ( + width, height, fwidth, fheight))) + if resize: + resize = (fwidth, fheight) + # Workaround: Calculate the expected frame size, to be used by the + # callback to decide when a frame ends. This is to work around a + # firmware bug that causes the raw image to be returned twice when the + # maximum camera resolution is requested + self._frame_size = int(fwidth * fheight * bpp) + super(PiRawMixin, self).__init__( + parent, camera_port, input_port, format, resize, **options) + + def _create_encoder(self, format): + """ + Overridden to skip creating an encoder. Instead, this class simply uses + the resizer's port as the output port (if a resizer has been + configured) or the specified input port otherwise. + """ + if self.resizer: + self.output_port = self.resizer.outputs[0] + else: + self.output_port = self.input_port + try: + self.output_port.format = self.RAW_ENCODINGS[format][0] + except KeyError: + raise PiCameraValueError('unknown format %s' % format) + self.output_port.commit() + + def _callback_write(self, buf, key=PiVideoFrameType.frame): + """ + _callback_write(buf, key=PiVideoFrameType.frame) + + Overridden to strip alpha bytes when required. + """ + if self._strip_alpha: + return super(PiRawMixin, self)._callback_write(MMALBufferAlphaStrip(buf._buf), key) + else: + return super(PiRawMixin, self)._callback_write(buf, key) + + +class PiVideoEncoder(PiEncoder): + """ + Encoder for video recording. + + This derivative of :class:`PiEncoder` configures itself for H.264 or MJPEG + encoding. It also introduces a :meth:`split` method which is used by + :meth:`~PiCamera.split_recording` and :meth:`~PiCamera.record_sequence` to + redirect future output to a new filename or object. Finally, it also + extends :meth:`PiEncoder.start` and :meth:`PiEncoder._callback_write` to + track video frame meta-data, and to permit recording motion data to a + separate output object. + """ + + encoder_type = mo.MMALVideoEncoder + + def __init__( + self, parent, camera_port, input_port, format, resize, **options): + super(PiVideoEncoder, self).__init__( + parent, camera_port, input_port, format, resize, **options) + self._next_output = [] + self._split_frame = None + self.frame = None + + def _create_encoder( + self, format, bitrate=17000000, intra_period=None, profile='high', + level='4', quantization=0, quality=0, inline_headers=True, + sei=False, sps_timing=False, motion_output=None, + intra_refresh=None): + """ + Extends the base :meth:`~PiEncoder._create_encoder` implementation to + configure the video encoder for H.264 or MJPEG output. + """ + super(PiVideoEncoder, self)._create_encoder(format) + + # XXX Remove quantization in 2.0 + quality = quality or quantization + + try: + self.output_port.format = { + 'h264': mmal.MMAL_ENCODING_H264, + 'mjpeg': mmal.MMAL_ENCODING_MJPEG, + }[format] + except KeyError: + raise PiCameraValueError('Unsupported format %s' % format) + + if format == 'h264': + try: + profile = { + 'baseline': mmal.MMAL_VIDEO_PROFILE_H264_BASELINE, + 'main': mmal.MMAL_VIDEO_PROFILE_H264_MAIN, + #'extended': mmal.MMAL_VIDEO_PROFILE_H264_EXTENDED, + 'high': mmal.MMAL_VIDEO_PROFILE_H264_HIGH, + 'constrained': mmal.MMAL_VIDEO_PROFILE_H264_CONSTRAINED_BASELINE, + }[profile] + except KeyError: + raise PiCameraValueError("Invalid H.264 profile %s" % profile) + try: + level = { + '1': mmal.MMAL_VIDEO_LEVEL_H264_1, + '1.0': mmal.MMAL_VIDEO_LEVEL_H264_1, + '1b': mmal.MMAL_VIDEO_LEVEL_H264_1b, + '1.1': mmal.MMAL_VIDEO_LEVEL_H264_11, + '1.2': mmal.MMAL_VIDEO_LEVEL_H264_12, + '1.3': mmal.MMAL_VIDEO_LEVEL_H264_13, + '2': mmal.MMAL_VIDEO_LEVEL_H264_2, + '2.0': mmal.MMAL_VIDEO_LEVEL_H264_2, + '2.1': mmal.MMAL_VIDEO_LEVEL_H264_21, + '2.2': mmal.MMAL_VIDEO_LEVEL_H264_22, + '3': mmal.MMAL_VIDEO_LEVEL_H264_3, + '3.0': mmal.MMAL_VIDEO_LEVEL_H264_3, + '3.1': mmal.MMAL_VIDEO_LEVEL_H264_31, + '3.2': mmal.MMAL_VIDEO_LEVEL_H264_32, + '4': mmal.MMAL_VIDEO_LEVEL_H264_4, + '4.0': mmal.MMAL_VIDEO_LEVEL_H264_4, + '4.1': mmal.MMAL_VIDEO_LEVEL_H264_41, + '4.2': mmal.MMAL_VIDEO_LEVEL_H264_42, + }[level] + except KeyError: + raise PiCameraValueError("Invalid H.264 level %s" % level) + + # From https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels + bitrate_limit = { + # level, high-profile: bitrate + (mmal.MMAL_VIDEO_LEVEL_H264_1, False): 64000, + (mmal.MMAL_VIDEO_LEVEL_H264_1b, False): 128000, + (mmal.MMAL_VIDEO_LEVEL_H264_11, False): 192000, + (mmal.MMAL_VIDEO_LEVEL_H264_12, False): 384000, + (mmal.MMAL_VIDEO_LEVEL_H264_13, False): 768000, + (mmal.MMAL_VIDEO_LEVEL_H264_2, False): 2000000, + (mmal.MMAL_VIDEO_LEVEL_H264_21, False): 4000000, + (mmal.MMAL_VIDEO_LEVEL_H264_22, False): 4000000, + (mmal.MMAL_VIDEO_LEVEL_H264_3, False): 10000000, + (mmal.MMAL_VIDEO_LEVEL_H264_31, False): 14000000, + (mmal.MMAL_VIDEO_LEVEL_H264_32, False): 20000000, + (mmal.MMAL_VIDEO_LEVEL_H264_4, False): 20000000, + (mmal.MMAL_VIDEO_LEVEL_H264_41, False): 50000000, + (mmal.MMAL_VIDEO_LEVEL_H264_42, False): 50000000, + (mmal.MMAL_VIDEO_LEVEL_H264_1, True): 80000, + (mmal.MMAL_VIDEO_LEVEL_H264_1b, True): 160000, + (mmal.MMAL_VIDEO_LEVEL_H264_11, True): 240000, + (mmal.MMAL_VIDEO_LEVEL_H264_12, True): 480000, + (mmal.MMAL_VIDEO_LEVEL_H264_13, True): 960000, + (mmal.MMAL_VIDEO_LEVEL_H264_2, True): 2500000, + (mmal.MMAL_VIDEO_LEVEL_H264_21, True): 5000000, + (mmal.MMAL_VIDEO_LEVEL_H264_22, True): 5000000, + (mmal.MMAL_VIDEO_LEVEL_H264_3, True): 12500000, + (mmal.MMAL_VIDEO_LEVEL_H264_31, True): 17500000, + (mmal.MMAL_VIDEO_LEVEL_H264_32, True): 25000000, + (mmal.MMAL_VIDEO_LEVEL_H264_4, True): 25000000, + (mmal.MMAL_VIDEO_LEVEL_H264_41, True): 62500000, + (mmal.MMAL_VIDEO_LEVEL_H264_42, True): 62500000, + }[level, profile == mmal.MMAL_VIDEO_PROFILE_H264_HIGH] + if bitrate > bitrate_limit: + raise PiCameraValueError( + 'bitrate %d exceeds %d which is the limit for the ' + 'selected H.264 level and profile' % + (bitrate, bitrate_limit)) + self.output_port.bitrate = bitrate + self.output_port.commit() + + # Again, from https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels + macroblocks_per_s_limit, macroblocks_limit = { + #level: macroblocks/s, macroblocks + mmal.MMAL_VIDEO_LEVEL_H264_1: (1485, 99), + mmal.MMAL_VIDEO_LEVEL_H264_1b: (1485, 99), + mmal.MMAL_VIDEO_LEVEL_H264_11: (3000, 396), + mmal.MMAL_VIDEO_LEVEL_H264_12: (6000, 396), + mmal.MMAL_VIDEO_LEVEL_H264_13: (11880, 396), + mmal.MMAL_VIDEO_LEVEL_H264_2: (11880, 396), + mmal.MMAL_VIDEO_LEVEL_H264_21: (19800, 792), + mmal.MMAL_VIDEO_LEVEL_H264_22: (20250, 1620), + mmal.MMAL_VIDEO_LEVEL_H264_3: (40500, 1620), + mmal.MMAL_VIDEO_LEVEL_H264_31: (108000, 3600), + mmal.MMAL_VIDEO_LEVEL_H264_32: (216000, 5120), + mmal.MMAL_VIDEO_LEVEL_H264_4: (245760, 8192), + mmal.MMAL_VIDEO_LEVEL_H264_41: (245760, 8192), + mmal.MMAL_VIDEO_LEVEL_H264_42: (522240, 8704), + }[level] + w, h = self.output_port.framesize + w = bcm_host.VCOS_ALIGN_UP(w, 16) >> 4 + h = bcm_host.VCOS_ALIGN_UP(h, 16) >> 4 + if w * h > macroblocks_limit: + raise PiCameraValueError( + 'output resolution %s exceeds macroblock limit (%d) for ' + 'the selected H.264 profile and level' % + (self.output_port.framesize, macroblocks_limit)) + if self.parent: + if self.parent.framerate == 0: + # Take the upper limit of the framerate range as we're + # only interested in the macroblock limit. No need to + # bother with framerate delta here as it doesn't work with + # range + framerate = self.parent.framerate_range[1] + else: + framerate = ( + self.parent.framerate + self.parent.framerate_delta) + else: + framerate = self.input_port.framerate + if w * h * framerate > macroblocks_per_s_limit: + raise PiCameraValueError( + 'output resolution and framerate exceeds macroblocks/s ' + 'limit (%d) for the selected H.264 profile and ' + 'level' % macroblocks_per_s_limit) + + mp = mmal.MMAL_PARAMETER_VIDEO_PROFILE_T( + mmal.MMAL_PARAMETER_HEADER_T( + mmal.MMAL_PARAMETER_PROFILE, + ct.sizeof(mmal.MMAL_PARAMETER_VIDEO_PROFILE_T), + ), + ) + mp.profile[0].profile = profile + mp.profile[0].level = level + self.output_port.params[mmal.MMAL_PARAMETER_PROFILE] = mp + + if inline_headers: + self.output_port.params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER] = True + if sei: + self.output_port.params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_SEI_ENABLE] = True + if sps_timing: + self.output_port.params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_SPS_TIMING] = True + if motion_output is not None: + self.output_port.params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_VECTORS] = True + + # We need the intra-period to calculate the SPS header timeout in + # the split method below. If one is not set explicitly, query the + # encoder's default + if intra_period is not None: + self.output_port.params[mmal.MMAL_PARAMETER_INTRAPERIOD] = intra_period + self._intra_period = intra_period + else: + self._intra_period = self.output_port.params[mmal.MMAL_PARAMETER_INTRAPERIOD] + + if intra_refresh is not None: + # Get the intra-refresh structure first as there are several + # other fields in it which we don't wish to overwrite + mp = self.output_port.params[mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH] + try: + mp.refresh_mode = { + 'cyclic': mmal.MMAL_VIDEO_INTRA_REFRESH_CYCLIC, + 'adaptive': mmal.MMAL_VIDEO_INTRA_REFRESH_ADAPTIVE, + 'both': mmal.MMAL_VIDEO_INTRA_REFRESH_BOTH, + 'cyclicrows': mmal.MMAL_VIDEO_INTRA_REFRESH_CYCLIC_MROWS, + }[intra_refresh] + except KeyError: + raise PiCameraValueError( + "Invalid intra_refresh %s" % intra_refresh) + self.output_port.params[mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH] = mp + + elif format == 'mjpeg': + self.output_port.bitrate = bitrate + self.output_port.commit() + # MJPEG doesn't have an intra_period setting as such, but as every + # frame is a full-frame, the intra_period is effectively 1 + self._intra_period = 1 + + if quality: + self.output_port.params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT] = quality + self.output_port.params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT] = quality + self.output_port.params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT] = quality + + self.encoder.inputs[0].params[mmal.MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT] = True + self.encoder.enable() + + def start(self, output, motion_output=None): + """ + Extended to initialize video frame meta-data tracking. + """ + self.frame = PiVideoFrame( + index=0, + frame_type=None, + frame_size=0, + video_size=0, + split_size=0, + timestamp=0, + complete=False, + ) + if motion_output is not None: + self._open_output(motion_output, PiVideoFrameType.motion_data) + super(PiVideoEncoder, self).start(output) + + def stop(self): + super(PiVideoEncoder, self).stop() + self._close_output(PiVideoFrameType.motion_data) + + def request_key_frame(self): + """ + Called to request an I-frame from the encoder. + + This method is called by :meth:`~PiCamera.request_key_frame` and + :meth:`split` to force the encoder to output an I-frame as soon as + possible. + """ + self.encoder.control.params[mmal.MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME] = True + + def split(self, output, motion_output=None): + """ + Called to switch the encoder's output. + + This method is called by :meth:`~PiCamera.split_recording` and + :meth:`~PiCamera.record_sequence` to switch the encoder's + :attr:`output` object to the *output* parameter (which can be a + filename or a file-like object, as with :meth:`start`). + """ + with self.outputs_lock: + outputs = {} + if output is not None: + outputs[PiVideoFrameType.frame] = output + if motion_output is not None: + outputs[PiVideoFrameType.motion_data] = motion_output + self._next_output.append(outputs) + # intra_period / framerate gives the time between I-frames (which + # should also coincide with SPS headers). We multiply by three to + # ensure the timeout is deliberately excessive, and clamp the minimum + # timeout to 15 seconds (otherwise unencoded formats tend to fail + # presumably due to I/O capacity) + if self.parent: + framerate = self.parent.framerate + self.parent.framerate_delta + else: + framerate = self.input_port.framerate + timeout = max(15.0, float(self._intra_period / framerate) * 3.0) + if self._intra_period > 1: + self.request_key_frame() + if not self.event.wait(timeout): + raise PiCameraRuntimeError('Timed out waiting for a split point') + self.event.clear() + return self._split_frame + + def _callback_write(self, buf, key=PiVideoFrameType.frame): + """ + Extended to implement video frame meta-data tracking, and to handle + splitting video recording to the next output when :meth:`split` is + called. + """ + last_frame = self.frame + this_frame = PiVideoFrame( + index= + last_frame.index + 1 + if last_frame.complete else + last_frame.index, + frame_type= + PiVideoFrameType.key_frame + if buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_KEYFRAME else + PiVideoFrameType.sps_header + if buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CONFIG else + PiVideoFrameType.motion_data + if buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else + PiVideoFrameType.frame, + frame_size= + buf.length + if last_frame.complete else + last_frame.frame_size + buf.length, + video_size= + last_frame.video_size + if buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else + last_frame.video_size + buf.length, + split_size= + last_frame.split_size + if buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else + last_frame.split_size + buf.length, + timestamp= + # Time cannot go backwards, so if we've got an unknown pts + # simply repeat the last one + last_frame.timestamp + if buf.pts in (0, mmal.MMAL_TIME_UNKNOWN) else + buf.pts, + complete= + bool(buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END) + ) + if self._intra_period == 1 or (buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CONFIG): + with self.outputs_lock: + try: + new_outputs = self._next_output.pop(0) + except IndexError: + new_outputs = None + if new_outputs: + for new_key, new_output in new_outputs.items(): + self._close_output(new_key) + self._open_output(new_output, new_key) + if new_key == PiVideoFrameType.frame: + this_frame = PiVideoFrame( + index=this_frame.index, + frame_type=this_frame.frame_type, + frame_size=this_frame.frame_size, + video_size=this_frame.video_size, + split_size=0, + timestamp=this_frame.timestamp, + complete=this_frame.complete, + ) + self._split_frame = this_frame + self.event.set() + if buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO: + key = PiVideoFrameType.motion_data + self.frame = this_frame + return super(PiVideoEncoder, self)._callback_write(buf, key) + + +class PiCookedVideoEncoder(PiVideoEncoder): + """ + Video encoder for encoded recordings. + + This class is a derivative of :class:`PiVideoEncoder` and only exists to + provide naming symmetry with the image encoder classes. + """ + + +class PiRawVideoEncoder(PiRawMixin, PiVideoEncoder): + """ + Video encoder for unencoded recordings. + + This class is a derivative of :class:`PiVideoEncoder` and the + :class:`PiRawMixin` class intended for use with + :meth:`~PiCamera.start_recording` when it is called with an unencoded + format. + + .. warning:: + + This class creates an inheritance diamond. Take care to determine the + MRO of super-class calls. + """ + + def _create_encoder(self, format): + super(PiRawVideoEncoder, self)._create_encoder(format) + # Raw formats don't have an intra_period setting as such, but as every + # frame is a full-frame, the intra_period is effectively 1 + self._intra_period = 1 + + +class PiImageEncoder(PiEncoder): + """ + Encoder for image capture. + + This derivative of :class:`PiEncoder` extends the :meth:`_create_encoder` + method to configure the encoder for a variety of encoded image outputs + (JPEG, PNG, etc.). + """ + + encoder_type = mo.MMALImageEncoder + + def _create_encoder( + self, format, quality=85, thumbnail=(64, 48, 35), restart=0): + """ + Extends the base :meth:`~PiEncoder._create_encoder` implementation to + configure the image encoder for JPEG, PNG, etc. + """ + super(PiImageEncoder, self)._create_encoder(format) + + try: + self.output_port.format = { + 'jpeg': mmal.MMAL_ENCODING_JPEG, + 'png': mmal.MMAL_ENCODING_PNG, + 'gif': mmal.MMAL_ENCODING_GIF, + 'bmp': mmal.MMAL_ENCODING_BMP, + }[format] + except KeyError: + raise PiCameraValueError("Unsupported format %s" % format) + self.output_port.commit() + + if format == 'jpeg': + self.output_port.params[mmal.MMAL_PARAMETER_JPEG_Q_FACTOR] = quality + if restart > 0: + # Don't set if zero as old firmwares don't support this param + self.output_port.params[mmal.MMAL_PARAMETER_JPEG_RESTART_INTERVAL] = restart + if thumbnail is None: + mp = mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T( + mmal.MMAL_PARAMETER_HEADER_T( + mmal.MMAL_PARAMETER_THUMBNAIL_CONFIGURATION, + ct.sizeof(mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T) + ), + 0, 0, 0, 0) + else: + mp = mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T( + mmal.MMAL_PARAMETER_HEADER_T( + mmal.MMAL_PARAMETER_THUMBNAIL_CONFIGURATION, + ct.sizeof(mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T) + ), + 1, *thumbnail) + self.encoder.control.params[mmal.MMAL_PARAMETER_THUMBNAIL_CONFIGURATION] = mp + + self.encoder.enable() + + +class PiOneImageEncoder(PiImageEncoder): + """ + Encoder for single image capture. + + This class simply extends :meth:`~PiEncoder._callback_write` to terminate + capture at frame end (i.e. after a single frame has been received). + """ + + def _callback_write(self, buf, key=PiVideoFrameType.frame): + return ( + super(PiOneImageEncoder, self)._callback_write(buf, key) + ) or bool( + buf.flags & ( + mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END | + mmal.MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED) + ) + + +class PiMultiImageEncoder(PiImageEncoder): + """ + Encoder for multiple image capture. + + This class extends :class:`PiImageEncoder` to handle an iterable of outputs + instead of a single output. The :meth:`~PiEncoder._callback_write` method + is extended to terminate capture when the iterable is exhausted, while + :meth:`PiEncoder._open_output` is overridden to begin iteration and rely + on the new :meth:`_next_output` method to advance output to the next item + in the iterable. + """ + + def _open_output(self, outputs, key=PiVideoFrameType.frame): + self._output_iter = iter(outputs) + self._next_output(key) + + def _next_output(self, key=PiVideoFrameType.frame): + """ + This method moves output to the next item from the iterable passed to + :meth:`~PiEncoder.start`. + """ + self._close_output(key) + super(PiMultiImageEncoder, self)._open_output(next(self._output_iter), key) + + def _callback_write(self, buf, key=PiVideoFrameType.frame): + try: + if ( + super(PiMultiImageEncoder, self)._callback_write(buf, key) + ) or bool( + buf.flags & ( + mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END | + mmal.MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED) + ): + self._next_output(key) + return False + except StopIteration: + return True + + +class PiCookedOneImageEncoder(PiOneImageEncoder): + """ + Encoder for "cooked" (encoded) single image output. + + This encoder extends :class:`PiOneImageEncoder` to include Exif tags in the + output. + """ + + exif_encoding = 'ascii' + + def __init__( + self, parent, camera_port, input_port, format, resize, **options): + super(PiCookedOneImageEncoder, self).__init__( + parent, camera_port, input_port, format, resize, **options) + if parent: + self.exif_tags = self.parent.exif_tags + else: + self.exif_tags = {} + + def _add_exif_tag(self, tag, value): + # Format the tag and value into an appropriate bytes string, encoded + # with the Exif encoding (ASCII) + if isinstance(tag, str): + tag = tag.encode(self.exif_encoding) + if isinstance(value, str): + value = value.encode(self.exif_encoding) + elif isinstance(value, datetime.datetime): + value = value.strftime('%Y:%m:%d %H:%M:%S').encode(self.exif_encoding) + # MMAL_PARAMETER_EXIF_T is a variable sized structure, hence all the + # mucking about with string buffers here... + buf = ct.create_string_buffer( + ct.sizeof(mmal.MMAL_PARAMETER_EXIF_T) + len(tag) + len(value) + 1) + mp = ct.cast(buf, ct.POINTER(mmal.MMAL_PARAMETER_EXIF_T)) + mp[0].hdr.id = mmal.MMAL_PARAMETER_EXIF + mp[0].hdr.size = len(buf) + if (b'=' in tag or b'\x00' in value): + data = tag + value + mp[0].keylen = len(tag) + mp[0].value_offset = len(tag) + mp[0].valuelen = len(value) + else: + data = tag + b'=' + value + ct.memmove(mp[0].data, data, len(data)) + self.output_port.params[mmal.MMAL_PARAMETER_EXIF] = mp[0] + + def start(self, output): + timestamp = datetime.datetime.now() + timestamp_tags = ( + 'EXIF.DateTimeDigitized', + 'EXIF.DateTimeOriginal', + 'IFD0.DateTime') + # Timestamp tags are always included with the value calculated + # above, but the user may choose to override the value in the + # exif_tags mapping + for tag in timestamp_tags: + self._add_exif_tag(tag, self.exif_tags.get(tag, timestamp)) + # All other tags are just copied in verbatim + for tag, value in self.exif_tags.items(): + if not tag in timestamp_tags: + self._add_exif_tag(tag, value) + super(PiCookedOneImageEncoder, self).start(output) + + +class PiCookedMultiImageEncoder(PiMultiImageEncoder): + """ + Encoder for "cooked" (encoded) multiple image output. + + This encoder descends from :class:`PiMultiImageEncoder` but includes no + new functionality as video-port based encodes (which is all this class + is used for) don't support Exif tag output. + """ + pass + + +class PiRawImageMixin(PiRawMixin, PiImageEncoder): + """ + Mixin class for "raw" (unencoded) image capture. + + The :meth:`_callback_write` method is overridden to manually calculate when + to terminate output. + """ + + def __init__( + self, parent, camera_port, input_port, format, resize, **options): + super(PiRawImageMixin, self).__init__( + parent, camera_port, input_port, format, resize, **options) + self._image_size = 0 + + def _callback_write(self, buf, key=PiVideoFrameType.frame): + """ + Overridden to manually calculate when to terminate capture (see + comments in :meth:`__init__`). + """ + if self._image_size > 0: + super(PiRawImageMixin, self)._callback_write(buf, key) + self._image_size -= buf.length + return self._image_size <= 0 + + def start(self, output): + self._image_size = self._frame_size + super(PiRawImageMixin, self).start(output) + + +class PiRawOneImageEncoder(PiOneImageEncoder, PiRawImageMixin): + """ + Single image encoder for unencoded capture. + + This class is a derivative of :class:`PiOneImageEncoder` and the + :class:`PiRawImageMixin` class intended for use with + :meth:`~PiCamera.capture` (et al) when it is called with an unencoded image + format. + + .. warning:: + + This class creates an inheritance diamond. Take care to determine the + MRO of super-class calls. + """ + pass + + +class PiRawMultiImageEncoder(PiMultiImageEncoder, PiRawImageMixin): + """ + Multiple image encoder for unencoded capture. + + This class is a derivative of :class:`PiMultiImageEncoder` and the + :class:`PiRawImageMixin` class intended for use with + :meth:`~PiCamera.capture_sequence` when it is called with an unencoded + image format. + + .. warning:: + + This class creates an inheritance diamond. Take care to determine the + MRO of super-class calls. + """ + def _next_output(self, key=PiVideoFrameType.frame): + super(PiRawMultiImageEncoder, self)._next_output(key) + self._image_size = self._frame_size + diff --git a/picamera/exc.py b/picamera/exc.py new file mode 100644 index 0000000..9efe30d --- /dev/null +++ b/picamera/exc.py @@ -0,0 +1,185 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python camera library for the Rasperry-Pi camera module +# Copyright (c) 2013-2017 Dave Jones +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str equivalent to Py3's +str = type('') + + +import picamera.mmal as mmal + + +class PiCameraWarning(Warning): + """ + Base class for PiCamera warnings. + """ + + +class PiCameraDeprecated(PiCameraWarning, DeprecationWarning): + """ + Raised when deprecated functionality in picamera is used. + """ + + +class PiCameraFallback(PiCameraWarning, RuntimeWarning): + """ + Raised when picamera has to fallback on old functionality. + """ + + +class PiCameraResizerEncoding(PiCameraWarning, RuntimeWarning): + """ + Raised when picamera uses a resizer purely for encoding purposes. + """ + + +class PiCameraAlphaStripping(PiCameraWarning, RuntimeWarning): + """ + Raised when picamera does alpha-byte stripping. + """ + + +class PiCameraResolutionRounded(PiCameraWarning, RuntimeWarning): + """ + Raised when picamera has to round a requested frame size upward. + """ + + +class PiCameraError(Exception): + """ + Base class for PiCamera errors. + """ + + +class PiCameraRuntimeError(PiCameraError, RuntimeError): + """ + Raised when an invalid sequence of operations is attempted with a + :class:`PiCamera` object. + """ + + +class PiCameraClosed(PiCameraRuntimeError): + """ + Raised when a method is called on a camera which has already been closed. + """ + + +class PiCameraNotRecording(PiCameraRuntimeError): + """ + Raised when :meth:`~PiCamera.stop_recording` or + :meth:`~PiCamera.split_recording` are called against a port which has no + recording active. + """ + + +class PiCameraAlreadyRecording(PiCameraRuntimeError): + """ + Raised when :meth:`~PiCamera.start_recording` or + :meth:`~PiCamera.record_sequence` are called against a port which already + has an active recording. + """ + + +class PiCameraValueError(PiCameraError, ValueError): + """ + Raised when an invalid value is fed to a :class:`~PiCamera` object. + """ + + +class PiCameraIOError(PiCameraError, IOError): + """ + Raised when a :class:`~PiCamera` object is unable to perform an IO + operation. + """ + + +class PiCameraMMALError(PiCameraError): + """ + Raised when an MMAL operation fails for whatever reason. + """ + def __init__(self, status, prefix=""): + self.status = status + PiCameraError.__init__(self, "%s%s%s" % (prefix, ": " if prefix else "", { + mmal.MMAL_ENOMEM: "Out of memory", + mmal.MMAL_ENOSPC: "Out of resources", + mmal.MMAL_EINVAL: "Argument is invalid", + mmal.MMAL_ENOSYS: "Function not implemented", + mmal.MMAL_ENOENT: "No such file or directory", + mmal.MMAL_ENXIO: "No such device or address", + mmal.MMAL_EIO: "I/O error", + mmal.MMAL_ESPIPE: "Illegal seek", + mmal.MMAL_ECORRUPT: "Data is corrupt #FIXME not POSIX", + mmal.MMAL_ENOTREADY: "Component is not ready #FIXME not POSIX", + mmal.MMAL_ECONFIG: "Component is not configured #FIXME not POSIX", + mmal.MMAL_EISCONN: "Port is already connected", + mmal.MMAL_ENOTCONN: "Port is disconnected", + mmal.MMAL_EAGAIN: "Resource temporarily unavailable; try again later", + mmal.MMAL_EFAULT: "Bad address", + }.get(status, "Unknown status error"))) + + +class PiCameraPortDisabled(PiCameraMMALError): + """ + Raised when attempting a buffer operation on a disabled port. + + This exception is intended for the common use-case of attempting to get + or send a buffer just when a component is shutting down (e.g. at script + teardown) and simplifies the trivial response (ignore the error and shut + down quietly). For example:: + + def _callback(self, port, buf): + try: + buf = self.outputs[0].get_buffer(False) + except PiCameraPortDisabled: + return True # shutting down + # ... + """ + def __init__(self, msg): + super(PiCameraPortDisabled, self).__init__(mmal.MMAL_EINVAL, msg) + + +def mmal_check(status, prefix=""): + """ + Checks the return status of an mmal call and raises an exception on + failure. + + The *status* parameter is the result of an MMAL call. If *status* is + anything other than MMAL_SUCCESS, a :exc:`PiCameraMMALError` exception is + raised. The optional *prefix* parameter specifies a prefix message to place + at the start of the exception's message to provide some context. + """ + if status != mmal.MMAL_SUCCESS: + raise PiCameraMMALError(status, prefix) + diff --git a/picamera/frames.py b/picamera/frames.py new file mode 100644 index 0000000..7a1ee99 --- /dev/null +++ b/picamera/frames.py @@ -0,0 +1,214 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python camera library for the Rasperry-Pi camera module +# Copyright (c) 2013-2017 Dave Jones +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str and range equivalent to Py3's +str = type('') + +import warnings +from collections import namedtuple + +from picamera.exc import ( + mmal_check, + PiCameraError, + PiCameraMMALError, + PiCameraValueError, + PiCameraRuntimeError, + PiCameraDeprecated, + ) + + +class PiVideoFrameType(object): + """ + This class simply defines constants used to represent the type of a frame + in :attr:`PiVideoFrame.frame_type`. Effectively it is a namespace for an + enum. + + .. attribute:: frame + + Indicates a predicted frame (P-frame). This is the most common frame + type. + + .. attribute:: key_frame + + Indicates an intra-frame (I-frame) also known as a key frame. + + .. attribute:: sps_header + + Indicates an inline SPS/PPS header (rather than picture data) which is + typically used as a split point. + + .. attribute:: motion_data + + Indicates the frame is inline motion vector data, rather than picture + data. + + .. versionadded:: 1.5 + """ + frame = 0 + key_frame = 1 + sps_header = 2 + motion_data = 3 + + +class PiVideoFrame(namedtuple('PiVideoFrame', ( + 'index', # the frame number, where the first frame is 0 + 'frame_type', # a constant indicating the frame type (see PiVideoFrameType) + 'frame_size', # the size (in bytes) of the frame's data + 'video_size', # the size (in bytes) of the video so far + 'split_size', # the size (in bytes) of the video since the last split + 'timestamp', # the presentation timestamp (PTS) of the frame + 'complete', # whether the frame is complete or not + ))): + """ + This class is a :func:`~collections.namedtuple` derivative used to store + information about a video frame. It is recommended that you access the + information stored by this class by attribute name rather than position + (for example: ``frame.index`` rather than ``frame[0]``). + + .. attribute:: index + + Returns the zero-based number of the frame. This is a monotonic counter + that is simply incremented every time the camera starts outputting a + new frame. As a consequence, this attribute cannot be used to detect + dropped frames. Nor does it necessarily represent actual frames; it + will be incremented for SPS headers and motion data buffers too. + + .. attribute:: frame_type + + Returns a constant indicating the kind of data that the frame contains + (see :class:`PiVideoFrameType`). Please note that certain frame types + contain no image data at all. + + .. attribute:: frame_size + + Returns the size in bytes of the current frame. If a frame is written + in multiple chunks, this value will increment while :attr:`index` + remains static. Query :attr:`complete` to determine whether the frame + has been completely output yet. + + .. attribute:: video_size + + Returns the size in bytes of the entire video up to this frame. Note + that this is unlikely to match the size of the actual file/stream + written so far. This is because a stream may utilize buffering which + will cause the actual amount written (e.g. to disk) to lag behind the + value reported by this attribute. + + .. attribute:: split_size + + Returns the size in bytes of the video recorded since the last call to + either :meth:`~PiCamera.start_recording` or + :meth:`~PiCamera.split_recording`. For the reasons explained above, + this may differ from the size of the actual file/stream written so far. + + .. attribute:: timestamp + + Returns the presentation timestamp (PTS) of the frame. This represents + the point in time that the Pi received the first line of the frame from + the camera. + + The timestamp is measured in microseconds (millionths of a second). + When the camera's clock mode is ``'reset'`` (the default), the + timestamp is relative to the start of the video recording. When the + camera's :attr:`~PiCamera.clock_mode` is ``'raw'``, it is relative to + the last system reboot. See :attr:`~PiCamera.timestamp` for more + information. + + .. warning:: + + Currently, the camera occasionally returns "time unknown" values in + this field. In this case, picamera will simply re-use the timestamp + of the previous frame (under the assumption that time never goes + backwards). This happens for SPS header "frames", for example. + + .. attribute:: complete + + Returns a bool indicating whether the current frame is complete or not. + If the frame is complete then :attr:`frame_size` will not increment + any further, and will reset for the next frame. + + .. versionchanged:: 1.5 + Deprecated :attr:`header` and :attr:`keyframe` attributes and added the + new :attr:`frame_type` attribute instead. + + .. versionchanged:: 1.9 + Added the :attr:`complete` attribute. + """ + + __slots__ = () # workaround python issue #24931 + + @property + def position(self): + """ + Returns the zero-based position of the frame in the stream containing + it. + """ + return self.split_size - self.frame_size + + @property + def keyframe(self): + """ + Returns a bool indicating whether the current frame is a keyframe (an + intra-frame, or I-frame in MPEG parlance). + + .. deprecated:: 1.5 + Please compare :attr:`frame_type` to + :attr:`PiVideoFrameType.key_frame` instead. + """ + warnings.warn( + PiCameraDeprecated( + 'PiVideoFrame.keyframe is deprecated; please check ' + 'PiVideoFrame.frame_type for equality with ' + 'PiVideoFrameType.key_frame instead')) + return self.frame_type == PiVideoFrameType.key_frame + + @property + def header(self): + """ + Contains a bool indicating whether the current frame is actually an + SPS/PPS header. Typically it is best to split an H.264 stream so that + it starts with an SPS/PPS header. + + .. deprecated:: 1.5 + Please compare :attr:`frame_type` to + :attr:`PiVideoFrameType.sps_header` instead. + """ + warnings.warn( + PiCameraDeprecated( + 'PiVideoFrame.header is deprecated; please check ' + 'PiVideoFrame.frame_type for equality with ' + 'PiVideoFrameType.sps_header instead')) + return self.frame_type == PiVideoFrameType.sps_header diff --git a/picamera/mmal.py b/picamera/mmal.py new file mode 100644 index 0000000..82b2b5f --- /dev/null +++ b/picamera/mmal.py @@ -0,0 +1,2481 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python header conversion +# Copyright (c) 2013-2017 Dave Jones +# +# Original headers +# Copyright (c) 2012, Broadcom Europe Ltd +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str equivalent to Py3's +str = type('') + +import ctypes as ct +import warnings + +from .bcm_host import VCOS_UNSIGNED + +_lib = ct.CDLL('libmmal.so') + +# mmal.h ##################################################################### + +MMAL_VERSION_MAJOR = 0 +MMAL_VERSION_MINOR = 1 +MMAL_VERSION = (MMAL_VERSION_MAJOR << 16 | MMAL_VERSION_MINOR) + +def MMAL_VERSION_TO_MAJOR(a): + return a >> 16 + +def MMAL_VERSION_TO_MINOR(a): + return a & 0xFFFF + +# mmal_common.h ############################################################## + +def MMAL_FOURCC(s): + return sum(ord(c) << (i * 8) for (i, c) in enumerate(s)) + +def FOURCC_str(n): + if n == 0: + return '\\0' + else: + return ''.join(chr(n >> i & 0xFF) for i in range(0, 32, 8)) + +MMAL_MAGIC = MMAL_FOURCC('mmal') + +MMAL_FALSE = 0 +MMAL_TRUE = 1 + +class MMAL_BOOL_T(ct.c_int32): + # This only exists to ensure we've got a distinct type to ct.c_int32 + # for mmalobj to perform dict-lookups against + def __str__(self): + return ['MMAL_FALSE', 'MMAL_TRUE'][bool(self.value)] + + def __repr__(self): + return str(self) + + +class MMAL_CORE_STATISTICS_T(ct.Structure): + _fields_ = [ + ('buffer_count', ct.c_uint32), + ('first_buffer_time', ct.c_uint32), + ('last_buffer_time', ct.c_uint32), + ('max_delay', ct.c_uint32), + ] + +class MMAL_CORE_PORT_STATISTICS_T(ct.Structure): + _fields_ = [ + ('rx', MMAL_CORE_STATISTICS_T), + ('tx', MMAL_CORE_STATISTICS_T), + ] + +MMAL_FIXED_16_16_T = ct.c_uint32 + +# mmal_types.h ############################################################### + +MMAL_STATUS_T = ct.c_uint32 # enum +( + MMAL_SUCCESS, + MMAL_ENOMEM, + MMAL_ENOSPC, + MMAL_EINVAL, + MMAL_ENOSYS, + MMAL_ENOENT, + MMAL_ENXIO, + MMAL_EIO, + MMAL_ESPIPE, + MMAL_ECORRUPT, + MMAL_ENOTREADY, + MMAL_ECONFIG, + MMAL_EISCONN, + MMAL_ENOTCONN, + MMAL_EAGAIN, + MMAL_EFAULT, +) = range(16) +MMAL_STATUS_MAX = 0x7FFFFFFF + +class MMAL_RECT_T(ct.Structure): + _fields_ = [ + ('x', ct.c_int32), + ('y', ct.c_int32), + ('width', ct.c_int32), + ('height', ct.c_int32), + ] + + def __repr__(self): + return '(%d, %d)->(%d, %d)' % ( + self.x, self.y, self.x + self.width, self.y + self.height) + +class MMAL_RATIONAL_T(ct.Structure): + _fields_ = [ + ('num', ct.c_int32), + ('den', ct.c_int32), + ] + + def __repr__(self): + return '%d/%d' % (self.num, self.den) + +MMAL_TIME_UNKNOWN = ct.c_int64(1<<63).value + +MMAL_FOURCC_T = ct.c_uint32 + +# mmal_format.h ############################################################## + +MMAL_ES_TYPE_T = ct.c_uint32 # enum +( + MMAL_ES_TYPE_UNKNOWN, + MMAL_ES_TYPE_CONTROL, + MMAL_ES_TYPE_AUDIO, + MMAL_ES_TYPE_VIDEO, + MMAL_ES_TYPE_SUBPICTURE, +) = range(5) + +class MMAL_VIDEO_FORMAT_T(ct.Structure): + _fields_ = [ + ('width', ct.c_uint32), + ('height', ct.c_uint32), + ('crop', MMAL_RECT_T), + ('frame_rate', MMAL_RATIONAL_T), + ('par', MMAL_RATIONAL_T), + ('color_space', MMAL_FOURCC_T), + ] + + def __repr__(self): + return '' % ( + self.width, self.height, self.crop, self.frame_rate, self.par, self.color_space) + +class MMAL_AUDIO_FORMAT_T(ct.Structure): + _fields_ = [ + ('channels', ct.c_uint32), + ('sample_rate', ct.c_uint32), + ('bits_per_sample', ct.c_uint32), + ('block_align', ct.c_uint32), + ] + + def __repr__(self): + return '' % ( + self.channels, self.sample_rate, self.bits_per_sample, self.block_align) + +class MMAL_SUBPICTURE_FORMAT_T(ct.Structure): + _fields_ = [ + ('x_offset', ct.c_uint32), + ('y_offset', ct.c_uint32), + ] + + def __repr__(self): + return '' % ( + self.x_offset, self.y_offset) + +class MMAL_ES_SPECIFIC_FORMAT_T(ct.Union): + _fields_ = [ + ('audio', MMAL_AUDIO_FORMAT_T), + ('video', MMAL_VIDEO_FORMAT_T), + ('subpicture', MMAL_SUBPICTURE_FORMAT_T), + ] + +MMAL_ES_FORMAT_FLAG_FRAMED = 0x01 +MMAL_ENCODING_UNKNOWN = 0 +MMAL_ENCODING_VARIANT_DEFAULT = 0 + +class MMAL_ES_FORMAT_T(ct.Structure): + _fields_ = [ + ('type', MMAL_ES_TYPE_T), + ('encoding', MMAL_FOURCC_T), + ('encoding_variant', MMAL_FOURCC_T), + ('es', ct.POINTER(MMAL_ES_SPECIFIC_FORMAT_T)), + ('bitrate', ct.c_uint32), + ('flags', ct.c_uint32), + ('extradata_size', ct.c_uint32), + ('extradata', ct.POINTER(ct.c_uint8)), + ] + + def __repr__(self): + return '' % (self.type, self.encoding) + +mmal_format_alloc = _lib.mmal_format_alloc +mmal_format_alloc.argtypes = [] +mmal_format_alloc.restype = ct.POINTER(MMAL_ES_FORMAT_T) + +mmal_format_free = _lib.mmal_format_free +mmal_format_free.argtypes = [ct.POINTER(MMAL_ES_FORMAT_T)] +mmal_format_free.restype = None + +mmal_format_extradata_alloc = _lib.mmal_format_extradata_alloc +mmal_format_extradata_alloc.argtypes = [ct.POINTER(MMAL_ES_FORMAT_T), ct.c_uint] +mmal_format_extradata_alloc.restype = MMAL_STATUS_T + +mmal_format_copy = _lib.mmal_format_copy +mmal_format_copy.argtypes = [ct.POINTER(MMAL_ES_FORMAT_T), ct.POINTER(MMAL_ES_FORMAT_T)] +mmal_format_copy.restype = None + +mmal_format_full_copy = _lib.mmal_format_full_copy +mmal_format_full_copy.argtypes = [ct.POINTER(MMAL_ES_FORMAT_T), ct.POINTER(MMAL_ES_FORMAT_T)] +mmal_format_full_copy.restype = MMAL_STATUS_T + +MMAL_ES_FORMAT_COMPARE_FLAG_TYPE = 0x01 +MMAL_ES_FORMAT_COMPARE_FLAG_ENCODING = 0x02 +MMAL_ES_FORMAT_COMPARE_FLAG_BITRATE = 0x04 +MMAL_ES_FORMAT_COMPARE_FLAG_FLAGS = 0x08 +MMAL_ES_FORMAT_COMPARE_FLAG_EXTRADATA = 0x10 + +MMAL_ES_FORMAT_COMPARE_FLAG_VIDEO_RESOLUTION = 0x0100 +MMAL_ES_FORMAT_COMPARE_FLAG_VIDEO_CROPPING = 0x0200 +MMAL_ES_FORMAT_COMPARE_FLAG_VIDEO_FRAME_RATE = 0x0400 +MMAL_ES_FORMAT_COMPARE_FLAG_VIDEO_ASPECT_RATIO = 0x0800 +MMAL_ES_FORMAT_COMPARE_FLAG_VIDEO_COLOR_SPACE = 0x1000 + +MMAL_ES_FORMAT_COMPARE_FLAG_ES_OTHER = 0x10000000 + +mmal_format_compare = _lib.mmal_format_compare +mmal_format_compare.argtypes = [ct.POINTER(MMAL_ES_FORMAT_T), ct.POINTER(MMAL_ES_FORMAT_T)] +mmal_format_compare.restype = ct.c_uint32 + +# mmal_buffer.h ############################################################## + +class MMAL_BUFFER_HEADER_VIDEO_SPECIFIC_T(ct.Structure): + _fields_ = [ + ('planes', ct.c_uint32), + ('offset', ct.c_uint32 * 4), + ('pitch', ct.c_uint32 * 4), + ('flags', ct.c_uint32), + ] + +class MMAL_BUFFER_HEADER_TYPE_SPECIFIC_T(ct.Union): + _fields_ = [ + ('video', MMAL_BUFFER_HEADER_VIDEO_SPECIFIC_T), + ] + +class MMAL_BUFFER_HEADER_PRIVATE_T(ct.Structure): + _fields_ = [] + +class MMAL_BUFFER_HEADER_T(ct.Structure): + pass + +MMAL_BUFFER_HEADER_T._fields_ = [ + ('next', ct.POINTER(MMAL_BUFFER_HEADER_T)), # self-reference + ('priv', ct.POINTER(MMAL_BUFFER_HEADER_PRIVATE_T)), + ('cmd', ct.c_uint32), + ('data', ct.POINTER(ct.c_uint8)), + ('alloc_size', ct.c_uint32), + ('length', ct.c_uint32), + ('offset', ct.c_uint32), + ('flags', ct.c_uint32), + ('pts', ct.c_int64), + ('dts', ct.c_int64), + ('type', ct.POINTER(MMAL_BUFFER_HEADER_TYPE_SPECIFIC_T)), + ('user_data', ct.c_void_p), + ] + +MMAL_BUFFER_HEADER_FLAG_EOS = (1<<0) +MMAL_BUFFER_HEADER_FLAG_FRAME_START = (1<<1) +MMAL_BUFFER_HEADER_FLAG_FRAME_END = (1<<2) +MMAL_BUFFER_HEADER_FLAG_FRAME = (MMAL_BUFFER_HEADER_FLAG_FRAME_START|MMAL_BUFFER_HEADER_FLAG_FRAME_END) +MMAL_BUFFER_HEADER_FLAG_KEYFRAME = (1<<3) +MMAL_BUFFER_HEADER_FLAG_DISCONTINUITY = (1<<4) +MMAL_BUFFER_HEADER_FLAG_CONFIG = (1<<5) +MMAL_BUFFER_HEADER_FLAG_ENCRYPTED = (1<<6) +MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO = (1<<7) +MMAL_BUFFER_HEADER_FLAGS_SNAPSHOT = (1<<8) +MMAL_BUFFER_HEADER_FLAG_CORRUPTED = (1<<9) +MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED = (1<<10) +MMAL_BUFFER_HEADER_FLAG_DECODEONLY = (1<<11) + +MMAL_BUFFER_HEADER_FLAG_FORMAT_SPECIFIC_START = (1<<16) +MMAL_BUFFER_HEADER_VIDEO_FLAG_INTERLACED = (MMAL_BUFFER_HEADER_FLAG_FORMAT_SPECIFIC_START<<0) +MMAL_BUFFER_HEADER_VIDEO_FLAG_TOP_FIELD_FIRST = (MMAL_BUFFER_HEADER_FLAG_FORMAT_SPECIFIC_START<<1) +MMAL_BUFFER_HEADER_VIDEO_FLAG_DISPLAY_EXTERNAL = (MMAL_BUFFER_HEADER_FLAG_FORMAT_SPECIFIC_START<<3) +MMAL_BUFFER_HEADER_VIDEO_FLAG_PROTECTED = (MMAL_BUFFER_HEADER_FLAG_FORMAT_SPECIFIC_START<<4) + +mmal_buffer_header_acquire = _lib.mmal_buffer_header_acquire +mmal_buffer_header_acquire.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_buffer_header_acquire.restype = None + +mmal_buffer_header_reset = _lib.mmal_buffer_header_reset +mmal_buffer_header_reset.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_buffer_header_reset.restype = None + +mmal_buffer_header_release = _lib.mmal_buffer_header_release +mmal_buffer_header_release.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_buffer_header_release.restype = None + +mmal_buffer_header_release_continue = _lib.mmal_buffer_header_release_continue +mmal_buffer_header_release_continue.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_buffer_header_release_continue.restype = None + +MMAL_BH_PRE_RELEASE_CB_T = ct.CFUNCTYPE( + MMAL_BOOL_T, + ct.POINTER(MMAL_BUFFER_HEADER_T), ct.c_void_p) + +mmal_buffer_header_pre_release_cb_set = _lib.mmal_buffer_header_pre_release_cb_set +mmal_buffer_header_pre_release_cb_set.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T), MMAL_BH_PRE_RELEASE_CB_T, ct.c_void_p] +mmal_buffer_header_pre_release_cb_set.restype = None + +mmal_buffer_header_replicate = _lib.mmal_buffer_header_replicate +mmal_buffer_header_replicate.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T), ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_buffer_header_replicate.restype = MMAL_STATUS_T + +mmal_buffer_header_mem_lock = _lib.mmal_buffer_header_mem_lock +mmal_buffer_header_mem_lock.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_buffer_header_mem_lock.restype = MMAL_STATUS_T + +mmal_buffer_header_mem_unlock = _lib.mmal_buffer_header_mem_unlock +mmal_buffer_header_mem_unlock.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_buffer_header_mem_unlock.restype = None + +# mmal_clock.h ############################################################### + +MMAL_CLOCK_EVENT_MAGIC = MMAL_FOURCC('CKLM') +MMAL_CLOCK_EVENT_REFERENCE = MMAL_FOURCC('CREF') +MMAL_CLOCK_EVENT_ACTIVE = MMAL_FOURCC('CACT') +MMAL_CLOCK_EVENT_SCALE = MMAL_FOURCC('CSCA') +MMAL_CLOCK_EVENT_TIME = MMAL_FOURCC('CTIM') +MMAL_CLOCK_EVENT_UPDATE_THRESHOLD = MMAL_FOURCC('CUTH') +MMAL_CLOCK_EVENT_DISCONT_THRESHOLD = MMAL_FOURCC('CDTH') +MMAL_CLOCK_EVENT_REQUEST_THRESHOLD = MMAL_FOURCC('CRTH') +MMAL_CLOCK_EVENT_INPUT_BUFFER_INFO = MMAL_FOURCC('CIBI') +MMAL_CLOCK_EVENT_OUTPUT_BUFFER_INFO = MMAL_FOURCC('COBI') +MMAL_CLOCK_EVENT_LATENCY = MMAL_FOURCC('CLAT') +MMAL_CLOCK_EVENT_INVALID = 0 + +class MMAL_CLOCK_UPDATE_THRESHOLD_T(ct.Structure): + _fields_ = [ + ('threshold_lower', ct.c_int64), + ('threshold_upper', ct.c_int64), + ] + +class MMAL_CLOCK_DISCONT_THRESHOLD_T(ct.Structure): + _fields_ = [ + ('threshold', ct.c_int64), + ('duration', ct.c_int64), + ] + +class MMAL_CLOCK_REQUEST_THRESHOLD_T(ct.Structure): + _fields_ = [ + ('threshold', ct.c_int64), + ('threshold_enable', MMAL_BOOL_T), + ] + +class MMAL_CLOCK_BUFFER_INFO_T(ct.Structure): + _fields_ = [ + ('time_stamp', ct.c_int64), + ('arrival_time', ct.c_uint32), + ] + +class MMAL_CLOCK_LATENCY_T(ct.Structure): + _fields_ = [ + ('target', ct.c_int64), + ('attack_period', ct.c_int64), + ('attack_rate', ct.c_int64), + ] + +class _MMAL_CLOCK_EVENT_DATA(ct.Union): + _fields_ = [ + ('enable', MMAL_BOOL_T), + ('scale', MMAL_RATIONAL_T), + ('media_time', ct.c_int64), + ('update_threshold', MMAL_CLOCK_UPDATE_THRESHOLD_T), + ('discont_threshold', MMAL_CLOCK_DISCONT_THRESHOLD_T), + ('request_threshold', MMAL_CLOCK_REQUEST_THRESHOLD_T), + ('buffer', MMAL_CLOCK_BUFFER_INFO_T), + ('latency', MMAL_CLOCK_LATENCY_T), + ] + +class MMAL_CLOCK_EVENT_T(ct.Structure): + _fields_ = [ + ('id', ct.c_uint32), + ('magic', ct.c_uint32), + ('buffer', ct.POINTER(MMAL_BUFFER_HEADER_T)), + ('padding0', ct.c_uint32), + ('data', _MMAL_CLOCK_EVENT_DATA), + ('padding1', ct.c_uint64), + ] + +# Ensure MMAL_CLOCK_EVENT_T preserves 64-bit alignment +assert not ct.sizeof(MMAL_CLOCK_EVENT_T) & 0x07 + +def MMAL_CLOCK_EVENT_INIT(i): + return MMAL_CLOCK_EVENT_T( + id=i, + magic=MMAL_CLOCK_EVENT_MAGIC, + buffer=None, + padding0=0, + data=_MMAL_CLOCK_EVENT_DATA(enable=MMAL_FALSE), + padding1=0, + ) + +# mmal_parameters_common.h ################################################### + +MMAL_PARAMETER_GROUP_COMMON = (0<<16) +MMAL_PARAMETER_GROUP_CAMERA = (1<<16) +MMAL_PARAMETER_GROUP_VIDEO = (2<<16) +MMAL_PARAMETER_GROUP_AUDIO = (3<<16) +MMAL_PARAMETER_GROUP_CLOCK = (4<<16) +MMAL_PARAMETER_GROUP_MIRACAST = (5<<16) + +( + MMAL_PARAMETER_UNUSED, + MMAL_PARAMETER_SUPPORTED_ENCODINGS, + MMAL_PARAMETER_URI, + MMAL_PARAMETER_CHANGE_EVENT_REQUEST, + MMAL_PARAMETER_ZERO_COPY, + MMAL_PARAMETER_BUFFER_REQUIREMENTS, + MMAL_PARAMETER_STATISTICS, + MMAL_PARAMETER_CORE_STATISTICS, + MMAL_PARAMETER_MEM_USAGE, + MMAL_PARAMETER_BUFFER_FLAG_FILTER, + MMAL_PARAMETER_SEEK, + MMAL_PARAMETER_POWERMON_ENABLE, + MMAL_PARAMETER_LOGGING, + MMAL_PARAMETER_SYSTEM_TIME, + MMAL_PARAMETER_NO_IMAGE_PADDING, + MMAL_PARAMETER_LOCKSTEP_ENABLE, +) = range(MMAL_PARAMETER_GROUP_COMMON, MMAL_PARAMETER_GROUP_COMMON + 16) + +class MMAL_PARAMETER_HEADER_T(ct.Structure): + _fields_ = [ + ('id', ct.c_uint32), + ('size', ct.c_uint32), + ] + +class MMAL_PARAMETER_CHANGE_EVENT_REQUEST_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('change_id', ct.c_uint32), + ('enable', MMAL_BOOL_T), + ] + +class MMAL_PARAMETER_BUFFER_REQUIREMENTS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('buffer_num_min', ct.c_uint32), + ('buffer_size_min', ct.c_uint32), + ('buffer_alignment_min', ct.c_uint32), + ('buffer_num_recommended', ct.c_uint32), + ('buffer_size_recommended', ct.c_uint32), + ] + +class MMAL_PARAMETER_SEEK_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('offset', ct.c_int64), + ('flags', ct.c_uint32), + ] + +MMAL_PARAM_SEEK_FLAG_PRECISE = 0x01 +MMAL_PARAM_SEEK_FLAG_FORWARD = 0x02 + +class MMAL_PARAMETER_STATISTICS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('buffer_count', ct.c_uint32), + ('frame_count', ct.c_uint32), + ('frames_skipped', ct.c_uint32), + ('frames_discarded', ct.c_uint32), + ('eos_seen', ct.c_uint32), + ('maximum_frame_bytes', ct.c_uint32), + ('total_bytes', ct.c_int64), + ('corrupt_macroblocks', ct.c_uint32), + ] + +MMAL_CORE_STATS_DIR = ct.c_uint32 # enum +( + MMAL_CORE_STATS_RX, + MMAL_CORE_STATS_TX, +) = range(2) +MMAL_CORE_STATS_MAX = 0x7fffffff + +class MMAL_PARAMETER_CORE_STATISTICS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('dir', MMAL_CORE_STATS_DIR), + ('reset', MMAL_BOOL_T), + ('stats', MMAL_CORE_STATISTICS_T), + ] + +class MMAL_PARAMETER_MEM_USAGE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('pool_mem_alloc_size', ct.c_uint32), + ] + +class MMAL_PARAMETER_LOGGING_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('set', ct.c_uint32), + ('clear', ct.c_uint32), + ] + +# mmal_parameters_camera.h ################################################### + +( + MMAL_PARAMETER_THUMBNAIL_CONFIGURATION, + MMAL_PARAMETER_CAPTURE_QUALITY, + MMAL_PARAMETER_ROTATION, + MMAL_PARAMETER_EXIF_DISABLE, + MMAL_PARAMETER_EXIF, + MMAL_PARAMETER_AWB_MODE, + MMAL_PARAMETER_IMAGE_EFFECT, + MMAL_PARAMETER_COLOUR_EFFECT, + MMAL_PARAMETER_FLICKER_AVOID, + MMAL_PARAMETER_FLASH, + MMAL_PARAMETER_REDEYE, + MMAL_PARAMETER_FOCUS, + MMAL_PARAMETER_FOCAL_LENGTHS, + MMAL_PARAMETER_EXPOSURE_COMP, + MMAL_PARAMETER_ZOOM, + MMAL_PARAMETER_MIRROR, + MMAL_PARAMETER_CAMERA_NUM, + MMAL_PARAMETER_CAPTURE, + MMAL_PARAMETER_EXPOSURE_MODE, + MMAL_PARAMETER_EXP_METERING_MODE, + MMAL_PARAMETER_FOCUS_STATUS, + MMAL_PARAMETER_CAMERA_CONFIG, + MMAL_PARAMETER_CAPTURE_STATUS, + MMAL_PARAMETER_FACE_TRACK, + MMAL_PARAMETER_DRAW_BOX_FACES_AND_FOCUS, + MMAL_PARAMETER_JPEG_Q_FACTOR, + MMAL_PARAMETER_FRAME_RATE, + MMAL_PARAMETER_USE_STC, + MMAL_PARAMETER_CAMERA_INFO, + MMAL_PARAMETER_VIDEO_STABILISATION, + MMAL_PARAMETER_FACE_TRACK_RESULTS, + MMAL_PARAMETER_ENABLE_RAW_CAPTURE, + MMAL_PARAMETER_DPF_FILE, + MMAL_PARAMETER_ENABLE_DPF_FILE, + MMAL_PARAMETER_DPF_FAIL_IS_FATAL, + MMAL_PARAMETER_CAPTURE_MODE, + MMAL_PARAMETER_FOCUS_REGIONS, + MMAL_PARAMETER_INPUT_CROP, + MMAL_PARAMETER_SENSOR_INFORMATION, + MMAL_PARAMETER_FLASH_SELECT, + MMAL_PARAMETER_FIELD_OF_VIEW, + MMAL_PARAMETER_HIGH_DYNAMIC_RANGE, + MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION, + MMAL_PARAMETER_ALGORITHM_CONTROL, + MMAL_PARAMETER_SHARPNESS, + MMAL_PARAMETER_CONTRAST, + MMAL_PARAMETER_BRIGHTNESS, + MMAL_PARAMETER_SATURATION, + MMAL_PARAMETER_ISO, + MMAL_PARAMETER_ANTISHAKE, + MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS, + MMAL_PARAMETER_CAMERA_BURST_CAPTURE, + MMAL_PARAMETER_CAMERA_MIN_ISO, + MMAL_PARAMETER_CAMERA_USE_CASE, + MMAL_PARAMETER_CAPTURE_STATS_PASS, + MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG, + MMAL_PARAMETER_ENABLE_REGISTER_FILE, + MMAL_PARAMETER_REGISTER_FAIL_IS_FATAL, + MMAL_PARAMETER_CONFIGFILE_REGISTERS, + MMAL_PARAMETER_CONFIGFILE_CHUNK_REGISTERS, + MMAL_PARAMETER_JPEG_ATTACH_LOG, + MMAL_PARAMETER_ZERO_SHUTTER_LAG, + MMAL_PARAMETER_FPS_RANGE, + MMAL_PARAMETER_CAPTURE_EXPOSURE_COMP, + MMAL_PARAMETER_SW_SHARPEN_DISABLE, + MMAL_PARAMETER_FLASH_REQUIRED, + MMAL_PARAMETER_SW_SATURATION_DISABLE, + MMAL_PARAMETER_SHUTTER_SPEED, + MMAL_PARAMETER_CUSTOM_AWB_GAINS, + MMAL_PARAMETER_CAMERA_SETTINGS, + MMAL_PARAMETER_PRIVACY_INDICATOR, + MMAL_PARAMETER_VIDEO_DENOISE, + MMAL_PARAMETER_STILLS_DENOISE, + MMAL_PARAMETER_ANNOTATE, + MMAL_PARAMETER_STEREOSCOPIC_MODE, + MMAL_PARAMETER_CAMERA_INTERFACE, + MMAL_PARAMETER_CAMERA_CLOCKING_MODE, + MMAL_PARAMETER_CAMERA_RX_CONFIG, + MMAL_PARAMETER_CAMERA_RX_TIMING, + MMAL_PARAMETER_DPF_CONFIG, + MMAL_PARAMETER_JPEG_RESTART_INTERVAL, + MMAL_PARAMETER_CAMERA_ISP_BLOCK_OVERRIDE, +) = range(MMAL_PARAMETER_GROUP_CAMERA, MMAL_PARAMETER_GROUP_CAMERA + 82) + +class MMAL_PARAMETER_THUMBNAIL_CONFIG_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('enable', ct.c_uint32), + ('width', ct.c_uint32), + ('height', ct.c_uint32), + ('quality', ct.c_uint32), + ] + +class MMAL_PARAMETER_EXIF_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('keylen', ct.c_uint32), + ('value_offset', ct.c_uint32), + ('valuelen', ct.c_uint32), + ('data', ct.c_uint8 * 1), + ] + +MMAL_PARAM_EXPOSUREMODE_T = ct.c_uint32 # enum +( + MMAL_PARAM_EXPOSUREMODE_OFF, + MMAL_PARAM_EXPOSUREMODE_AUTO, + MMAL_PARAM_EXPOSUREMODE_NIGHT, + MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW, + MMAL_PARAM_EXPOSUREMODE_BACKLIGHT, + MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT, + MMAL_PARAM_EXPOSUREMODE_SPORTS, + MMAL_PARAM_EXPOSUREMODE_SNOW, + MMAL_PARAM_EXPOSUREMODE_BEACH, + MMAL_PARAM_EXPOSUREMODE_VERYLONG, + MMAL_PARAM_EXPOSUREMODE_FIXEDFPS, + MMAL_PARAM_EXPOSUREMODE_ANTISHAKE, + MMAL_PARAM_EXPOSUREMODE_FIREWORKS, +) = range(13) +MMAL_PARAM_EXPOSUREMODE_MAX = 0x7fffffff + +class MMAL_PARAMETER_EXPOSUREMODE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_PARAM_EXPOSUREMODE_T), + ] + +MMAL_PARAM_EXPOSUREMETERINGMODE_T = ct.c_uint32 # enum +( + MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE, + MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT, + MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT, + MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX, +) = range(4) +MMAL_PARAM_EXPOSUREMETERINGMODE_MAX = 0x7fffffff + +class MMAL_PARAMETER_EXPOSUREMETERINGMODE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_PARAM_EXPOSUREMETERINGMODE_T), + ] + +MMAL_PARAM_AWBMODE_T = ct.c_uint32 # enum +( + MMAL_PARAM_AWBMODE_OFF, + MMAL_PARAM_AWBMODE_AUTO, + MMAL_PARAM_AWBMODE_SUNLIGHT, + MMAL_PARAM_AWBMODE_CLOUDY, + MMAL_PARAM_AWBMODE_SHADE, + MMAL_PARAM_AWBMODE_TUNGSTEN, + MMAL_PARAM_AWBMODE_FLUORESCENT, + MMAL_PARAM_AWBMODE_INCANDESCENT, + MMAL_PARAM_AWBMODE_FLASH, + MMAL_PARAM_AWBMODE_HORIZON, + MMAL_PARAM_AWBMODE_GREYWORLD, +) = range(11) +MMAL_PARAM_AWBMODE_MAX = 0x7fffffff + +class MMAL_PARAMETER_AWBMODE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_PARAM_AWBMODE_T), + ] + +MMAL_PARAM_IMAGEFX_T = ct.c_uint32 # enum +( + MMAL_PARAM_IMAGEFX_NONE, + MMAL_PARAM_IMAGEFX_NEGATIVE, + MMAL_PARAM_IMAGEFX_SOLARIZE, + MMAL_PARAM_IMAGEFX_POSTERIZE, + MMAL_PARAM_IMAGEFX_WHITEBOARD, + MMAL_PARAM_IMAGEFX_BLACKBOARD, + MMAL_PARAM_IMAGEFX_SKETCH, + MMAL_PARAM_IMAGEFX_DENOISE, + MMAL_PARAM_IMAGEFX_EMBOSS, + MMAL_PARAM_IMAGEFX_OILPAINT, + MMAL_PARAM_IMAGEFX_HATCH, + MMAL_PARAM_IMAGEFX_GPEN, + MMAL_PARAM_IMAGEFX_PASTEL, + MMAL_PARAM_IMAGEFX_WATERCOLOUR, + MMAL_PARAM_IMAGEFX_FILM, + MMAL_PARAM_IMAGEFX_BLUR, + MMAL_PARAM_IMAGEFX_SATURATION, + MMAL_PARAM_IMAGEFX_COLOURSWAP, + MMAL_PARAM_IMAGEFX_WASHEDOUT, + MMAL_PARAM_IMAGEFX_POSTERISE, + MMAL_PARAM_IMAGEFX_COLOURPOINT, + MMAL_PARAM_IMAGEFX_COLOURBALANCE, + MMAL_PARAM_IMAGEFX_CARTOON, + MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE, + MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV, + MMAL_PARAM_IMAGEFX_DEINTERLACE_FAST, +) = range(26) +MMAL_PARAM_IMAGEFX_MAX = 0x7fffffff + +class MMAL_PARAMETER_IMAGEFX_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_PARAM_IMAGEFX_T), + ] + +MMAL_MAX_IMAGEFX_PARAMETERS = 6 + +class MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('effect', MMAL_PARAM_IMAGEFX_T), + ('num_effect_params', ct.c_uint32), + ('effect_parameter', ct.c_uint32 * MMAL_MAX_IMAGEFX_PARAMETERS), + ] + +class MMAL_PARAMETER_COLOURFX_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('enable', ct.c_int32), + ('u', ct.c_uint32), + ('v', ct.c_uint32), + ] + +MMAL_CAMERA_STC_MODE_T = ct.c_uint32 # enum +( + MMAL_PARAM_STC_MODE_OFF, + MMAL_PARAM_STC_MODE_RAW, + MMAL_PARAM_STC_MODE_COOKED, +) = range(3) +MMAL_PARAM_STC_MODE_MAX = 0x7fffffff + +class MMAL_PARAMETER_CAMERA_STC_MODE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_CAMERA_STC_MODE_T), + ] + +MMAL_PARAM_FLICKERAVOID_T = ct.c_uint32 # enum +( + MMAL_PARAM_FLICKERAVOID_OFF, + MMAL_PARAM_FLICKERAVOID_AUTO, + MMAL_PARAM_FLICKERAVOID_50HZ, + MMAL_PARAM_FLICKERAVOID_60HZ, +) = range(4) +MMAL_PARAM_FLICKERAVOID_MAX = 0x7FFFFFFF + +class MMAL_PARAMETER_FLICKERAVOID_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_PARAM_FLICKERAVOID_T), + ] + +MMAL_PARAM_FLASH_T = ct.c_uint32 # enum +( + MMAL_PARAM_FLASH_OFF, + MMAL_PARAM_FLASH_AUTO, + MMAL_PARAM_FLASH_ON, + MMAL_PARAM_FLASH_REDEYE, + MMAL_PARAM_FLASH_FILLIN, + MMAL_PARAM_FLASH_TORCH, +) = range(6) +MMAL_PARAM_FLASH_MAX = 0x7FFFFFFF + +class MMAL_PARAMETER_FLASH_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_PARAM_FLASH_T), + ] + +MMAL_PARAM_REDEYE_T = ct.c_uint32 # enum +( + MMAL_PARAM_REDEYE_OFF, + MMAL_PARAM_REDEYE_ON, + MMAL_PARAM_REDEYE_SIMPLE, +) = range(3) +MMAL_PARAM_REDEYE_MAX = 0x7FFFFFFF + +class MMAL_PARAMETER_REDEYE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_PARAM_REDEYE_T), + ] + +MMAL_PARAM_FOCUS_T = ct.c_uint32 # enum +( + MMAL_PARAM_FOCUS_AUTO, + MMAL_PARAM_FOCUS_AUTO_NEAR, + MMAL_PARAM_FOCUS_AUTO_MACRO, + MMAL_PARAM_FOCUS_CAF, + MMAL_PARAM_FOCUS_CAF_NEAR, + MMAL_PARAM_FOCUS_FIXED_INFINITY, + MMAL_PARAM_FOCUS_FIXED_HYPERFOCAL, + MMAL_PARAM_FOCUS_FIXED_NEAR, + MMAL_PARAM_FOCUS_FIXED_MACRO, + MMAL_PARAM_FOCUS_EDOF, + MMAL_PARAM_FOCUS_CAF_MACRO, + MMAL_PARAM_FOCUS_CAF_FAST, + MMAL_PARAM_FOCUS_CAF_NEAR_FAST, + MMAL_PARAM_FOCUS_CAF_MACRO_FAST, + MMAL_PARAM_FOCUS_FIXED_CURRENT, +) = range(15) +MMAL_PARAM_FOCUS_MAX = 0x7FFFFFFF + +class MMAL_PARAMETER_FOCUS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_PARAM_FOCUS_T), + ] + +MMAL_PARAM_CAPTURE_STATUS_T = ct.c_uint32 # enum +( + MMAL_PARAM_CAPTURE_STATUS_NOT_CAPTURING, + MMAL_PARAM_CAPTURE_STATUS_CAPTURE_STARTED, + MMAL_PARAM_CAPTURE_STATUS_CAPTURE_ENDED, +) = range(3) +MMAL_PARAM_CAPTURE_STATUS_MAX = 0x7FFFFFFF + +class MMAL_PARAMETER_CAPTURE_STATUS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('status', MMAL_PARAM_CAPTURE_STATUS_T), + ] + +MMAL_PARAM_FOCUS_STATUS_T = ct.c_uint32 # enum +( + MMAL_PARAM_FOCUS_STATUS_OFF, + MMAL_PARAM_FOCUS_STATUS_REQUEST, + MMAL_PARAM_FOCUS_STATUS_REACHED, + MMAL_PARAM_FOCUS_STATUS_UNABLE_TO_REACH, + MMAL_PARAM_FOCUS_STATUS_LOST, + MMAL_PARAM_FOCUS_STATUS_CAF_MOVING, + MMAL_PARAM_FOCUS_STATUS_CAF_SUCCESS, + MMAL_PARAM_FOCUS_STATUS_CAF_FAILED, + MMAL_PARAM_FOCUS_STATUS_MANUAL_MOVING, + MMAL_PARAM_FOCUS_STATUS_MANUAL_REACHED, + MMAL_PARAM_FOCUS_STATUS_CAF_WATCHING, + MMAL_PARAM_FOCUS_STATUS_CAF_SCENE_CHANGED, +) = range(12) +MMAL_PARAM_FOCUS_STATUS_MAX = 0x7FFFFFFF + +class MMAL_PARAMETER_FOCUS_STATUS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('status', MMAL_PARAM_FOCUS_STATUS_T), + ] + +MMAL_PARAM_FACE_TRACK_MODE_T = ct.c_uint32 # enum +( + MMAL_PARAM_FACE_DETECT_NONE, + MMAL_PARAM_FACE_DETECT_ON, +) = range(2) +MMAL_PARAM_FACE_DETECT_MAX = 0x7FFFFFFF + +class MMAL_PARAMETER_FACE_TRACK_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('mode', MMAL_PARAM_FACE_TRACK_MODE_T), + ('maxRegions', ct.c_uint32), + ('frames', ct.c_uint32), + ('quality', ct.c_uint32), + ] + +class MMAL_PARAMETER_FACE_TRACK_FACE_T(ct.Structure): + _fields_ = [ + ('face_id', ct.c_int32), + ('score', ct.c_int32), + ('face_rect', MMAL_RECT_T), + ('eye_rect', MMAL_RECT_T * 2), + ('mouth_rect', MMAL_RECT_T), + ] + +class MMAL_PARAMETER_FACE_TRACK_RESULTS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('num_faces', ct.c_uint32), + ('frame_width', ct.c_uint32), + ('frame_height', ct.c_uint32), + ('faces', MMAL_PARAMETER_FACE_TRACK_FACE_T * 1), + ] + +MMAL_PARAMETER_CAMERA_CONFIG_TIMESTAMP_MODE_T = ct.c_uint32 # enum +( + MMAL_PARAM_TIMESTAMP_MODE_ZERO, + MMAL_PARAM_TIMESTAMP_MODE_RAW_STC, + MMAL_PARAM_TIMESTAMP_MODE_RESET_STC, +) = range(3) +MMAL_PARAM_TIMESTAMP_MODE_MAX = 0x7FFFFFFF + +class MMAL_PARAMETER_CAMERA_CONFIG_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('max_stills_w', ct.c_uint32), + ('max_stills_h', ct.c_uint32), + ('stills_yuv422', ct.c_uint32), + ('one_shot_stills', ct.c_uint32), + ('max_preview_video_w', ct.c_uint32), + ('max_preview_video_h', ct.c_uint32), + ('num_preview_video_frames', ct.c_uint32), + ('stills_capture_circular_buffer_height', ct.c_uint32), + ('fast_preview_resume', ct.c_uint32), + ('use_stc_timestamp', MMAL_PARAMETER_CAMERA_CONFIG_TIMESTAMP_MODE_T), + ] + +MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS = 4 +MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES = 2 +MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN = 16 + +class MMAL_PARAMETER_CAMERA_INFO_CAMERA_T(ct.Structure): + _fields_ = [ + ('port_id', ct.c_uint32), + ('max_width', ct.c_uint32), + ('max_height', ct.c_uint32), + ('lens_present', MMAL_BOOL_T), + ] + +class MMAL_PARAMETER_CAMERA_INFO_CAMERA_V2_T(ct.Structure): + _fields_ = [ + ('port_id', ct.c_uint32), + ('max_width', ct.c_uint32), + ('max_height', ct.c_uint32), + ('lens_present', MMAL_BOOL_T), + ('camera_name', ct.c_char * MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN), + ] + +MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_T = ct.c_uint32 # enum +MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_XENON = 0 +MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_LED = 1 +MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_OTHER = 2 +MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_MAX = 0x7FFFFFFF + +class MMAL_PARAMETER_CAMERA_INFO_FLASH_T(ct.Structure): + _fields_ = [ + ('flash_type', MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_T), + ] + +class MMAL_PARAMETER_CAMERA_INFO_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('num_cameras', ct.c_uint32), + ('num_flashes', ct.c_uint32), + ('cameras', MMAL_PARAMETER_CAMERA_INFO_CAMERA_T * MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS), + ('flashes', MMAL_PARAMETER_CAMERA_INFO_FLASH_T * MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES), + ] + +class MMAL_PARAMETER_CAMERA_INFO_V2_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('num_cameras', ct.c_uint32), + ('num_flashes', ct.c_uint32), + ('cameras', MMAL_PARAMETER_CAMERA_INFO_CAMERA_V2_T * MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS), + ('flashes', MMAL_PARAMETER_CAMERA_INFO_FLASH_T * MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES), + ] + +MMAL_PARAMETER_CAPTUREMODE_MODE_T = ct.c_uint32 # enum +( + MMAL_PARAM_CAPTUREMODE_WAIT_FOR_END, + MMAL_PARAM_CAPTUREMODE_WAIT_FOR_END_AND_HOLD, + MMAL_PARAM_CAPTUREMODE_RESUME_VF_IMMEDIATELY, +) = range(3) + +class MMAL_PARAMETER_CAPTUREMODE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('mode', MMAL_PARAMETER_CAPTUREMODE_MODE_T), + ] + +MMAL_PARAMETER_FOCUS_REGION_TYPE_T = ct.c_uint32 # enum +( + MMAL_PARAMETER_FOCUS_REGION_TYPE_NORMAL, + MMAL_PARAMETER_FOCUS_REGION_TYPE_FACE, + MMAL_PARAMETER_FOCUS_REGION_TYPE_MAX, +) = range(3) + +class MMAL_PARAMETER_FOCUS_REGION_T(ct.Structure): + _fields_ = [ + ('rect', MMAL_RECT_T), + ('weight', ct.c_uint32), + ('mask', ct.c_uint32), + ('type', MMAL_PARAMETER_FOCUS_REGION_TYPE_T), + ] + +class MMAL_PARAMETER_FOCUS_REGIONS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('num_regions', ct.c_uint32), + ('lock_to_faces', MMAL_BOOL_T), + ('regions', MMAL_PARAMETER_FOCUS_REGION_T * 1), + ] + +class MMAL_PARAMETER_INPUT_CROP_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('rect', MMAL_RECT_T), + ] + +class MMAL_PARAMETER_SENSOR_INFORMATION_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('f_number', MMAL_RATIONAL_T), + ('focal_length', MMAL_RATIONAL_T), + ('model_id', ct.c_uint32), + ('manufacturer_id', ct.c_uint32), + ('revision', ct.c_uint32), + ] + +class MMAL_PARAMETER_FLASH_SELECT_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('flash_type', MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_T), + ] + +class MMAL_PARAMETER_FIELD_OF_VIEW_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('fov_h', MMAL_RATIONAL_T), + ('fov_v', MMAL_RATIONAL_T), + ] + +MMAL_PARAMETER_DRC_STRENGTH_T = ct.c_uint32 # enum +( + MMAL_PARAMETER_DRC_STRENGTH_OFF, + MMAL_PARAMETER_DRC_STRENGTH_LOW, + MMAL_PARAMETER_DRC_STRENGTH_MEDIUM, + MMAL_PARAMETER_DRC_STRENGTH_HIGH, +) = range(4) +MMAL_PARAMETER_DRC_STRENGTH_MAX = 0x7fffffff + +class MMAL_PARAMETER_DRC_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('strength', MMAL_PARAMETER_DRC_STRENGTH_T), + ] + +MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_T = ct.c_uint32 # enum +( + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_FACETRACKING, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_REDEYE_REDUCTION, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_VIDEO_STABILISATION, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_WRITE_RAW, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_VIDEO_DENOISE, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_STILLS_DENOISE, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_TEMPORAL_DENOISE, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_ANTISHAKE, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_IMAGE_EFFECTS, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_DYNAMIC_RANGE_COMPRESSION, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_FACE_RECOGNITION, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_FACE_BEAUTIFICATION, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_SCENE_DETECTION, + MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_HIGH_DYNAMIC_RANGE, +) = range(14) +MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_MAX = 0x7fffffff + +class MMAL_PARAMETER_ALGORITHM_CONTROL_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('algorithm', MMAL_PARAMETER_ALGORITHM_CONTROL_ALGORITHMS_T), + ('enabled', MMAL_BOOL_T), + ] + +MMAL_PARAM_CAMERA_USE_CASE_T = ct.c_uint32 # enum +( + MMAL_PARAM_CAMERA_USE_CASE_UNKNOWN, + MMAL_PARAM_CAMERA_USE_CASE_STILLS_CAPTURE, + MMAL_PARAM_CAMERA_USE_CASE_VIDEO_CAPTURE, +) = range(3) +MMAL_PARAM_CAMERA_USE_CASE_MAX = 0x7fffffff + +class MMAL_PARAMETER_CAMERA_USE_CASE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('use_case', MMAL_PARAM_CAMERA_USE_CASE_T), + ] + +class MMAL_PARAMETER_FPS_RANGE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('fps_low', MMAL_RATIONAL_T), + ('fps_high', MMAL_RATIONAL_T), + ] + +class MMAL_PARAMETER_ZEROSHUTTERLAG_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('zero_shutter_lag_mode', MMAL_BOOL_T), + ('concurrent_capture', MMAL_BOOL_T), + ] + +class MMAL_PARAMETER_AWB_GAINS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('r_gain', MMAL_RATIONAL_T), + ('b_gain', MMAL_RATIONAL_T), + ] + +class MMAL_PARAMETER_CAMERA_SETTINGS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('exposure', ct.c_uint32), + ('analog_gain', MMAL_RATIONAL_T), + ('digital_gain', MMAL_RATIONAL_T), + ('awb_red_gain', MMAL_RATIONAL_T), + ('awb_blue_gain', MMAL_RATIONAL_T), + ('focus_position', ct.c_uint32), + ] + +MMAL_PARAM_PRIVACY_INDICATOR_T = ct.c_uint32 # enum +( + MMAL_PARAMETER_PRIVACY_INDICATOR_OFF, + MMAL_PARAMETER_PRIVACY_INDICATOR_ON, + MMAL_PARAMETER_PRIVACY_INDICATOR_FORCE_ON, +) = range(3) +MMAL_PARAMETER_PRIVACY_INDICATOR_MAX = 0x7fffffff + +class MMAL_PARAMETER_PRIVACY_INDICATOR_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('mode', MMAL_PARAM_PRIVACY_INDICATOR_T), + ] + +MMAL_CAMERA_ANNOTATE_MAX_TEXT_LEN = 32 + +class MMAL_PARAMETER_CAMERA_ANNOTATE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('enable', MMAL_BOOL_T), + ('text', ct.c_char * MMAL_CAMERA_ANNOTATE_MAX_TEXT_LEN), + ('show_shutter', MMAL_BOOL_T), + ('show_analog_gain', MMAL_BOOL_T), + ('show_lens', MMAL_BOOL_T), + ('show_caf', MMAL_BOOL_T), + ('show_motion', MMAL_BOOL_T), + ] + +MMAL_CAMERA_ANNOTATE_MAX_TEXT_LEN_V2 = 256 + +class MMAL_PARAMETER_CAMERA_ANNOTATE_V2_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('enable', MMAL_BOOL_T), + ('show_shutter', MMAL_BOOL_T), + ('show_analog_gain', MMAL_BOOL_T), + ('show_lens', MMAL_BOOL_T), + ('show_caf', MMAL_BOOL_T), + ('show_motion', MMAL_BOOL_T), + ('show_frame_num', MMAL_BOOL_T), + ('black_text_background', MMAL_BOOL_T), + ('text', ct.c_char * MMAL_CAMERA_ANNOTATE_MAX_TEXT_LEN_V2), + ] + +MMAL_CAMERA_ANNOTATE_MAX_TEXT_LEN_V3 = 256 + +class MMAL_PARAMETER_CAMERA_ANNOTATE_V3_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('enable', MMAL_BOOL_T), + ('show_shutter', MMAL_BOOL_T), + ('show_analog_gain', MMAL_BOOL_T), + ('show_lens', MMAL_BOOL_T), + ('show_caf', MMAL_BOOL_T), + ('show_motion', MMAL_BOOL_T), + ('show_frame_num', MMAL_BOOL_T), + ('enable_text_background', MMAL_BOOL_T), + ('custom_background_color', MMAL_BOOL_T), + ('custom_background_Y', ct.c_uint8), + ('custom_background_U', ct.c_uint8), + ('custom_background_V', ct.c_uint8), + ('dummy1', ct.c_uint8), + ('custom_text_color', MMAL_BOOL_T), + ('custom_text_Y', ct.c_uint8), + ('custom_text_U', ct.c_uint8), + ('custom_text_V', ct.c_uint8), + ('text_size', ct.c_uint8), + ('text', ct.c_char * MMAL_CAMERA_ANNOTATE_MAX_TEXT_LEN_V3), + ] + +MMAL_STEREOSCOPIC_MODE_T = ct.c_uint32 # enum +( + MMAL_STEREOSCOPIC_MODE_NONE, + MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE, + MMAL_STEREOSCOPIC_MODE_BOTTOM, +) = range(3) +MMAL_STEREOSCOPIC_MODE_MAX = 0x7fffffff + +class MMAL_PARAMETER_STEREOSCOPIC_MODE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('mode', MMAL_STEREOSCOPIC_MODE_T), + ('decimate', MMAL_BOOL_T), + ('swap_eyes', MMAL_BOOL_T), + ] + +MMAL_CAMERA_INTERFACE_T = ct.c_uint32 # enum +( + MMAL_CAMERA_INTERFACE_CSI2, + MMAL_CAMERA_INTERFACE_CCP2, + MMAL_CAMERA_INTERFACE_CPI, +) = range(3) +MMAL_CAMERA_INTERFACE_MAX = 0x7fffffff + +class MMAL_PARAMETER_CAMERA_INTERFACE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('mode', MMAL_CAMERA_INTERFACE_T), + ] + +MMAL_CAMERA_CLOCKING_MODE_T = ct.c_uint32 # enum +( + MMAL_CAMERA_CLOCKING_MODE_STROBE, + MMAL_CAMERA_CLOCKING_MODE_CLOCK, +) = range(2) +MMAL_CAMERA_CLOCKING_MODE_MAX = 0x7fffffff + +class MMAL_PARAMETER_CAMERA_CLOCKING_MODE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('mode', MMAL_CAMERA_CLOCKING_MODE_T), + ] + +MMAL_CAMERA_RX_CONFIG_DECODE = ct.c_uint32 # enum +( + MMAL_CAMERA_RX_CONFIG_DECODE_NONE, + MMAL_CAMERA_RX_CONFIG_DECODE_DPCM8TO10, + MMAL_CAMERA_RX_CONFIG_DECODE_DPCM7TO10, + MMAL_CAMERA_RX_CONFIG_DECODE_DPCM6TO10, + MMAL_CAMERA_RX_CONFIG_DECODE_DPCM8TO12, + MMAL_CAMERA_RX_CONFIG_DECODE_DPCM7TO12, + MMAL_CAMERA_RX_CONFIG_DECODE_DPCM6TO12, + MMAL_CAMERA_RX_CONFIG_DECODE_DPCM10TO14, + MMAL_CAMERA_RX_CONFIG_DECODE_DPCM8TO14, + MMAL_CAMERA_RX_CONFIG_DECODE_DPCM12TO16, + MMAL_CAMERA_RX_CONFIG_DECODE_DPCM10TO16, + MMAL_CAMERA_RX_CONFIG_DECODE_DPCM8TO16, +) = range(12) +MMAL_CAMERA_RX_CONFIG_DECODE_MAX = 0x7fffffff + +MMAL_CAMERA_RX_CONFIG_ENCODE = ct.c_uint32 # enum +( + MMAL_CAMERA_RX_CONFIG_ENCODE_NONE, + MMAL_CAMERA_RX_CONFIG_ENCODE_DPCM10TO8, + MMAL_CAMERA_RX_CONFIG_ENCODE_DPCM12TO8, + MMAL_CAMERA_RX_CONFIG_ENCODE_DPCM14TO8, +) = range(4) +MMAL_CAMERA_RX_CONFIG_ENCODE_MAX = 0x7fffffff + +MMAL_CAMERA_RX_CONFIG_UNPACK = ct.c_uint32 # enum +( + MMAL_CAMERA_RX_CONFIG_UNPACK_NONE, + MMAL_CAMERA_RX_CONFIG_UNPACK_6, + MMAL_CAMERA_RX_CONFIG_UNPACK_7, + MMAL_CAMERA_RX_CONFIG_UNPACK_8, + MMAL_CAMERA_RX_CONFIG_UNPACK_10, + MMAL_CAMERA_RX_CONFIG_UNPACK_12, + MMAL_CAMERA_RX_CONFIG_UNPACK_14, + MMAL_CAMERA_RX_CONFIG_UNPACK_16, +) = range(8) +MMAL_CAMERA_RX_CONFIG_UNPACK_MAX = 0x7fffffff + +MMAL_CAMERA_RX_CONFIG_PACK = ct.c_uint32 # enum +( + MMAL_CAMERA_RX_CONFIG_PACK_NONE, + MMAL_CAMERA_RX_CONFIG_PACK_8, + MMAL_CAMERA_RX_CONFIG_PACK_10, + MMAL_CAMERA_RX_CONFIG_PACK_12, + MMAL_CAMERA_RX_CONFIG_PACK_14, + MMAL_CAMERA_RX_CONFIG_PACK_16, + MMAL_CAMERA_RX_CONFIG_PACK_RAW10, + MMAL_CAMERA_RX_CONFIG_PACK_RAW12, +) = range(8) +MMAL_CAMERA_RX_CONFIG_PACK_MAX = 0x7fffffff + +class MMAL_PARAMETER_CAMERA_RX_CONFIG_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('decode', MMAL_CAMERA_RX_CONFIG_DECODE), + ('encode', MMAL_CAMERA_RX_CONFIG_ENCODE), + ('unpack', MMAL_CAMERA_RX_CONFIG_UNPACK), + ('pack', MMAL_CAMERA_RX_CONFIG_PACK), + ('data_lanes', ct.c_uint32), + ('encode_block_length', ct.c_uint32), + ('embedded_data_lines', ct.c_uint32), + ('image_id', ct.c_uint32), + ] + +class MMAL_PARAMETER_CAMERA_RX_TIMING_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('timing1', ct.c_uint32), + ('timing2', ct.c_uint32), + ('timing3', ct.c_uint32), + ('timing4', ct.c_uint32), + ('timing5', ct.c_uint32), + ('term1', ct.c_uint32), + ('term2', ct.c_uint32), + ('cpi_timing1', ct.c_uint32), + ('cpi_timing2', ct.c_uint32), + ] + +# mmal_parameters_video.h #################################################### + +( + MMAL_PARAMETER_DISPLAYREGION, + MMAL_PARAMETER_SUPPORTED_PROFILES, + MMAL_PARAMETER_PROFILE, + MMAL_PARAMETER_INTRAPERIOD, + MMAL_PARAMETER_RATECONTROL, + MMAL_PARAMETER_NALUNITFORMAT, + MMAL_PARAMETER_MINIMISE_FRAGMENTATION, + MMAL_PARAMETER_MB_ROWS_PER_SLICE, + MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION, + MMAL_PARAMETER_VIDEO_EEDE_ENABLE, + MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE, + MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME, + MMAL_PARAMETER_VIDEO_INTRA_REFRESH, + MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT, + MMAL_PARAMETER_VIDEO_BIT_RATE, + MMAL_PARAMETER_VIDEO_FRAME_RATE, + MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT, + MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT, + MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL, + MMAL_PARAMETER_EXTRA_BUFFERS, + MMAL_PARAMETER_VIDEO_ALIGN_HORIZ, + MMAL_PARAMETER_VIDEO_ALIGN_VERT, + MMAL_PARAMETER_VIDEO_DROPPABLE_PFRAMES, + MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT, + MMAL_PARAMETER_VIDEO_ENCODE_QP_P, + MMAL_PARAMETER_VIDEO_ENCODE_RC_SLICE_DQUANT, + MMAL_PARAMETER_VIDEO_ENCODE_FRAME_LIMIT_BITS, + MMAL_PARAMETER_VIDEO_ENCODE_PEAK_RATE, + MMAL_PARAMETER_VIDEO_ENCODE_H264_DISABLE_CABAC, + MMAL_PARAMETER_VIDEO_ENCODE_H264_LOW_LATENCY, + MMAL_PARAMETER_VIDEO_ENCODE_H264_AU_DELIMITERS, + MMAL_PARAMETER_VIDEO_ENCODE_H264_DEBLOCK_IDC, + MMAL_PARAMETER_VIDEO_ENCODE_H264_MB_INTRA_MODE, + MMAL_PARAMETER_VIDEO_ENCODE_HEADER_ON_OPEN, + MMAL_PARAMETER_VIDEO_ENCODE_PRECODE_FOR_QP, + MMAL_PARAMETER_VIDEO_DRM_INIT_INFO, + MMAL_PARAMETER_VIDEO_TIMESTAMP_FIFO, + MMAL_PARAMETER_VIDEO_DECODE_ERROR_CONCEALMENT, + MMAL_PARAMETER_VIDEO_DRM_PROTECT_BUFFER, + MMAL_PARAMETER_VIDEO_DECODE_CONFIG_VD3, + MMAL_PARAMETER_VIDEO_ENCODE_H264_VCL_HRD_PARAMETERS, + MMAL_PARAMETER_VIDEO_ENCODE_H264_LOW_DELAY_HRD_FLAG, + MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER, + MMAL_PARAMETER_VIDEO_ENCODE_SEI_ENABLE, + MMAL_PARAMETER_VIDEO_ENCODE_INLINE_VECTORS, + MMAL_PARAMETER_VIDEO_RENDER_STATS, + MMAL_PARAMETER_VIDEO_INTERLACE_TYPE, + MMAL_PARAMETER_VIDEO_INTERPOLATE_TIMESTAMPS, + MMAL_PARAMETER_VIDEO_ENCODE_SPS_TIMING, + MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS, +) = range(MMAL_PARAMETER_GROUP_VIDEO, MMAL_PARAMETER_GROUP_VIDEO + 50) + +MMAL_DISPLAYTRANSFORM_T = ct.c_uint32 # enum +MMAL_DISPLAY_ROT0 = 0 +MMAL_DISPLAY_MIRROR_ROT0 = 1 +MMAL_DISPLAY_MIRROR_ROT180 = 2 +MMAL_DISPLAY_ROT180 = 3 +MMAL_DISPLAY_MIRROR_ROT90 = 4 +MMAL_DISPLAY_ROT270 = 5 +MMAL_DISPLAY_ROT90 = 6 +MMAL_DISPLAY_MIRROR_ROT270 = 7 +MMAL_DISPLAY_DUMMY = 0x7FFFFFFF + +MMAL_DISPLAYMODE_T = ct.c_uint32 # enum +MMAL_DISPLAY_MODE_FILL = 0 +MMAL_DISPLAY_MODE_LETTERBOX = 1 +MMAL_DISPLAY_MODE_DUMMY = 0x7FFFFFFF + +MMAL_DISPLAYSET_T = ct.c_uint32 # enum +MMAL_DISPLAY_SET_NONE = 0 +MMAL_DISPLAY_SET_NUM = 1 +MMAL_DISPLAY_SET_FULLSCREEN = 2 +MMAL_DISPLAY_SET_TRANSFORM = 4 +MMAL_DISPLAY_SET_DEST_RECT = 8 +MMAL_DISPLAY_SET_SRC_RECT = 0x10 +MMAL_DISPLAY_SET_MODE = 0x20 +MMAL_DISPLAY_SET_PIXEL = 0x40 +MMAL_DISPLAY_SET_NOASPECT = 0x80 +MMAL_DISPLAY_SET_LAYER = 0x100 +MMAL_DISPLAY_SET_COPYPROTECT = 0x200 +MMAL_DISPLAY_SET_ALPHA = 0x400 +MMAL_DISPLAY_SET_DUMMY = 0x7FFFFFFF + +class MMAL_DISPLAYREGION_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('set', ct.c_uint32), + ('display_num', ct.c_uint32), + ('fullscreen', MMAL_BOOL_T), + ('transform', MMAL_DISPLAYTRANSFORM_T), + ('dest_rect', MMAL_RECT_T), + ('src_rect', MMAL_RECT_T), + ('noaspect', MMAL_BOOL_T), + ('mode', MMAL_DISPLAYMODE_T), + ('pixel_x', ct.c_uint32), + ('pixel_y', ct.c_uint32), + ('layer', ct.c_int32), + ('copyprotect_required', MMAL_BOOL_T), + ('alpha', ct.c_uint32), + ] + +MMAL_VIDEO_PROFILE_T = ct.c_uint32 # enum +( + MMAL_VIDEO_PROFILE_H263_BASELINE, + MMAL_VIDEO_PROFILE_H263_H320CODING, + MMAL_VIDEO_PROFILE_H263_BACKWARDCOMPATIBLE, + MMAL_VIDEO_PROFILE_H263_ISWV2, + MMAL_VIDEO_PROFILE_H263_ISWV3, + MMAL_VIDEO_PROFILE_H263_HIGHCOMPRESSION, + MMAL_VIDEO_PROFILE_H263_INTERNET, + MMAL_VIDEO_PROFILE_H263_INTERLACE, + MMAL_VIDEO_PROFILE_H263_HIGHLATENCY, + MMAL_VIDEO_PROFILE_MP4V_SIMPLE, + MMAL_VIDEO_PROFILE_MP4V_SIMPLESCALABLE, + MMAL_VIDEO_PROFILE_MP4V_CORE, + MMAL_VIDEO_PROFILE_MP4V_MAIN, + MMAL_VIDEO_PROFILE_MP4V_NBIT, + MMAL_VIDEO_PROFILE_MP4V_SCALABLETEXTURE, + MMAL_VIDEO_PROFILE_MP4V_SIMPLEFACE, + MMAL_VIDEO_PROFILE_MP4V_SIMPLEFBA, + MMAL_VIDEO_PROFILE_MP4V_BASICANIMATED, + MMAL_VIDEO_PROFILE_MP4V_HYBRID, + MMAL_VIDEO_PROFILE_MP4V_ADVANCEDREALTIME, + MMAL_VIDEO_PROFILE_MP4V_CORESCALABLE, + MMAL_VIDEO_PROFILE_MP4V_ADVANCEDCODING, + MMAL_VIDEO_PROFILE_MP4V_ADVANCEDCORE, + MMAL_VIDEO_PROFILE_MP4V_ADVANCEDSCALABLE, + MMAL_VIDEO_PROFILE_MP4V_ADVANCEDSIMPLE, + MMAL_VIDEO_PROFILE_H264_BASELINE, + MMAL_VIDEO_PROFILE_H264_MAIN, + MMAL_VIDEO_PROFILE_H264_EXTENDED, + MMAL_VIDEO_PROFILE_H264_HIGH, + MMAL_VIDEO_PROFILE_H264_HIGH10, + MMAL_VIDEO_PROFILE_H264_HIGH422, + MMAL_VIDEO_PROFILE_H264_HIGH444, + MMAL_VIDEO_PROFILE_H264_CONSTRAINED_BASELINE, +) = range(33) +MMAL_VIDEO_PROFILE_DUMMY = 0x7FFFFFFF + +MMAL_VIDEO_LEVEL_T = ct.c_uint32 # enum +( + MMAL_VIDEO_LEVEL_H263_10, + MMAL_VIDEO_LEVEL_H263_20, + MMAL_VIDEO_LEVEL_H263_30, + MMAL_VIDEO_LEVEL_H263_40, + MMAL_VIDEO_LEVEL_H263_45, + MMAL_VIDEO_LEVEL_H263_50, + MMAL_VIDEO_LEVEL_H263_60, + MMAL_VIDEO_LEVEL_H263_70, + MMAL_VIDEO_LEVEL_MP4V_0, + MMAL_VIDEO_LEVEL_MP4V_0b, + MMAL_VIDEO_LEVEL_MP4V_1, + MMAL_VIDEO_LEVEL_MP4V_2, + MMAL_VIDEO_LEVEL_MP4V_3, + MMAL_VIDEO_LEVEL_MP4V_4, + MMAL_VIDEO_LEVEL_MP4V_4a, + MMAL_VIDEO_LEVEL_MP4V_5, + MMAL_VIDEO_LEVEL_MP4V_6, + MMAL_VIDEO_LEVEL_H264_1, + MMAL_VIDEO_LEVEL_H264_1b, + MMAL_VIDEO_LEVEL_H264_11, + MMAL_VIDEO_LEVEL_H264_12, + MMAL_VIDEO_LEVEL_H264_13, + MMAL_VIDEO_LEVEL_H264_2, + MMAL_VIDEO_LEVEL_H264_21, + MMAL_VIDEO_LEVEL_H264_22, + MMAL_VIDEO_LEVEL_H264_3, + MMAL_VIDEO_LEVEL_H264_31, + MMAL_VIDEO_LEVEL_H264_32, + MMAL_VIDEO_LEVEL_H264_4, + MMAL_VIDEO_LEVEL_H264_41, + MMAL_VIDEO_LEVEL_H264_42, + MMAL_VIDEO_LEVEL_H264_5, + MMAL_VIDEO_LEVEL_H264_51, +) = range(33) +MMAL_VIDEO_LEVEL_DUMMY = 0x7FFFFFFF + +class MMAL_PARAMETER_VIDEO_PROFILE_S(ct.Structure): + _fields_ = [ + ('profile', MMAL_VIDEO_PROFILE_T), + ('level', MMAL_VIDEO_LEVEL_T), + ] + +class MMAL_PARAMETER_VIDEO_PROFILE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('profile', MMAL_PARAMETER_VIDEO_PROFILE_S * 1), + ] + +MMAL_VIDEO_RATECONTROL_T = ct.c_uint32 # enum +( + MMAL_VIDEO_RATECONTROL_DEFAULT, + MMAL_VIDEO_RATECONTROL_VARIABLE, + MMAL_VIDEO_RATECONTROL_CONSTANT, + MMAL_VIDEO_RATECONTROL_VARIABLE_SKIP_FRAMES, + MMAL_VIDEO_RATECONTROL_CONSTANT_SKIP_FRAMES, +) = range(5) +MMAL_VIDEO_RATECONTROL_DUMMY = 0x7fffffff + +MMAL_VIDEO_INTRA_REFRESH_T = ct.c_uint32 +( + MMAL_VIDEO_INTRA_REFRESH_CYCLIC, + MMAL_VIDEO_INTRA_REFRESH_ADAPTIVE, + MMAL_VIDEO_INTRA_REFRESH_BOTH, +) = range(3) +MMAL_VIDEO_INTRA_REFRESH_KHRONOSEXTENSIONS = 0x6F000000 +MMAL_VIDEO_INTRA_REFRESH_VENDORSTARTUNUSED = 0x7F000000 +( + MMAL_VIDEO_INTRA_REFRESH_CYCLIC_MROWS, + MMAL_VIDEO_INTRA_REFRESH_PSEUDO_RAND, + MMAL_VIDEO_INTRA_REFRESH_MAX, +) = range(MMAL_VIDEO_INTRA_REFRESH_VENDORSTARTUNUSED, MMAL_VIDEO_INTRA_REFRESH_VENDORSTARTUNUSED + 3) +MMAL_VIDEO_INTRA_REFRESH_DUMMY = 0x7FFFFFFF + +MMAL_VIDEO_ENCODE_RC_MODEL_T = ct.c_uint32 +MMAL_VIDEO_ENCODER_RC_MODEL_DEFAULT = 0 +( + MMAL_VIDEO_ENCODER_RC_MODEL_JVT, + MMAL_VIDEO_ENCODER_RC_MODEL_VOWIFI, + MMAL_VIDEO_ENCODER_RC_MODEL_CBR, + MMAL_VIDEO_ENCODER_RC_MODEL_LAST, +) = range(MMAL_VIDEO_ENCODER_RC_MODEL_DEFAULT, MMAL_VIDEO_ENCODER_RC_MODEL_DEFAULT + 4) +MMAL_VIDEO_ENCODER_RC_MODEL_DUMMY = 0x7FFFFFFF + +class MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('rc_model', MMAL_VIDEO_ENCODE_RC_MODEL_T), + ] + +class MMAL_PARAMETER_VIDEO_RATECONTROL_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('control', MMAL_VIDEO_RATECONTROL_T), + ] + +MMAL_VIDEO_ENCODE_H264_MB_INTRA_MODES_T = ct.c_uint32 # enum +MMAL_VIDEO_ENCODER_H264_MB_4x4_INTRA = 1 +MMAL_VIDEO_ENCODER_H264_MB_8x8_INTRA = 2 +MMAL_VIDEO_ENCODER_H264_MB_16x16_INTRA = 4 +MMAL_VIDEO_ENCODER_H264_MB_INTRA_DUMMY = 0x7fffffff + +class MMAL_PARAMETER_VIDEO_ENCODER_H264_MB_INTRA_MODES_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('mb_mode', MMAL_VIDEO_ENCODE_H264_MB_INTRA_MODES_T), + ] + +MMAL_VIDEO_NALUNITFORMAT_T = ct.c_uint32 +MMAL_VIDEO_NALUNITFORMAT_STARTCODES = 1 +MMAL_VIDEO_NALUNITFORMAT_NALUNITPERBUFFER = 2 +MMAL_VIDEO_NALUNITFORMAT_ONEBYTEINTERLEAVELENGTH = 4 +MMAL_VIDEO_NALUNITFORMAT_TWOBYTEINTERLEAVELENGTH = 8 +MMAL_VIDEO_NALUNITFORMAT_FOURBYTEINTERLEAVELENGTH = 16 +MMAL_VIDEO_NALUNITFORMAT_DUMMY = 0x7fffffff + +class MMAL_PARAMETER_VIDEO_NALUNITFORMAT_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('format', MMAL_VIDEO_NALUNITFORMAT_T), + ] + +class MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('custom_max_mbps', ct.c_uint32), + ('custom_max_fs', ct.c_uint32), + ('custom_max_br_and_cpb', ct.c_uint32), + ] + +class MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('refresh_mode', MMAL_VIDEO_INTRA_REFRESH_T), + ('air_mbs', ct.c_uint32), + ('air_ref', ct.c_uint32), + ('cir_mbs', ct.c_uint32), + ('pir_mbs', ct.c_uint32), + ] + +class MMAL_PARAMETER_VIDEO_EEDE_ENABLE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('enable', ct.c_uint32), + ] + +class MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('loss_rate', ct.c_uint32), + ] + +class MMAL_PARAMETER_VIDEO_DRM_INIT_INFO_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('current_time', ct.c_uint32), + ('ticks_per_sec', ct.c_uint32), + ('lhs', ct.c_uint8 * 32), + ] + +class MMAL_PARAMETER_VIDEO_DRM_PROTECT_BUFFER_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('size_wanted', ct.c_uint32), + ('protect', ct.c_uint32), + ('mem_handle', ct.c_uint32), + ('phys_addr', ct.c_void_p), + ] + +class MMAL_PARAMETER_VIDEO_RENDER_STATS_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('valid', MMAL_BOOL_T), + ('match', ct.c_uint32), + ('period', ct.c_uint32), + ('phase', ct.c_uint32), + ('pixel_clock_nominal', ct.c_uint32), + ('pixel_clock', ct.c_uint32), + ('hvs_status', ct.c_uint32), + ('dummy', ct.c_uint32 * 2), + ] + +MMAL_INTERLACE_TYPE_T = ct.c_uint32 # enum +( + MMAL_InterlaceProgressive, + MMAL_InterlaceFieldSingleUpperFirst, + MMAL_InterlaceFieldSingleLowerFirst, + MMAL_InterlaceFieldsInterleavedUpperFirst, + MMAL_InterlaceFieldsInterleavedLowerFirst, + MMAL_InterlaceMixed, +) = range(6) +MMAL_InterlaceKhronosExtensions = 0x6F000000 +MMAL_InterlaceVendorStartUnused = 0x7F000000 +MMAL_InterlaceMax = 0x7FFFFFFF + +class MMAL_PARAMETER_VIDEO_INTERLACE_TYPE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('eMode', MMAL_INTERLACE_TYPE_T), + ('bRepeatFirstField', MMAL_BOOL_T), + ] + +# mmal_parameters_audio.h #################################################### + +( + MMAL_PARAMETER_AUDIO_DESTINATION, + MMAL_PARAMETER_AUDIO_LATENCY_TARGET, + MMAL_PARAMETER_AUDIO_SOURCE, + MMAL_PARAMETER_AUDIO_PASSTHROUGH, +) = range(MMAL_PARAMETER_GROUP_AUDIO, MMAL_PARAMETER_GROUP_AUDIO + 4) + +class MMAL_PARAMETER_AUDIO_LATENCY_TARGET_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('enable', MMAL_BOOL_T), + ('filter', ct.c_uint32), + ('target', ct.c_uint32), + ('shift', ct.c_uint32), + ('speed_factor', ct.c_int32), + ('inter_factor', ct.c_int32), + ('adj_cap', ct.c_int32), + ] + +# mmal_parameters_clock.h #################################################### + +( + MMAL_PARAMETER_CLOCK_REFERENCE, + MMAL_PARAMETER_CLOCK_ACTIVE, + MMAL_PARAMETER_CLOCK_SCALE, + MMAL_PARAMETER_CLOCK_TIME, + MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD, + MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD, + MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD, + MMAL_PARAMETER_CLOCK_ENABLE_BUFFER_INFO, + MMAL_PARAMETER_CLOCK_FRAME_RATE, + MMAL_PARAMETER_CLOCK_LATENCY, +) = range(MMAL_PARAMETER_GROUP_CLOCK, MMAL_PARAMETER_GROUP_CLOCK + 10) + +class MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_CLOCK_UPDATE_THRESHOLD_T), + ] + +class MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_CLOCK_DISCONT_THRESHOLD_T), + ] + +class MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_CLOCK_REQUEST_THRESHOLD_T), + ] + +class MMAL_PARAMETER_CLOCK_LATENCY_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_CLOCK_LATENCY_T), + ] + +# mmal_parameters.h ########################################################## + +class MMAL_PARAMETER_UINT64_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', ct.c_uint64), + ] + +class MMAL_PARAMETER_INT64_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', ct.c_int64), + ] + +class MMAL_PARAMETER_UINT32_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', ct.c_uint32), + ] + +class MMAL_PARAMETER_INT32_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', ct.c_int32), + ] + +class MMAL_PARAMETER_RATIONAL_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_RATIONAL_T), + ] + +class MMAL_PARAMETER_BOOLEAN_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('enable', MMAL_BOOL_T), + ] + +class MMAL_PARAMETER_STRING_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('str', ct.c_char_p), + ] + +class MMAL_PARAMETER_BYTES_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('data', ct.POINTER(ct.c_uint8)), + ] + +class MMAL_PARAMETER_SCALEFACTOR_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('scale_x', MMAL_FIXED_16_16_T), + ('scale_y', MMAL_FIXED_16_16_T), + ] + +MMAL_PARAM_MIRROR_T = ct.c_uint32 # enum +( + MMAL_PARAM_MIRROR_NONE, + MMAL_PARAM_MIRROR_VERTICAL, + MMAL_PARAM_MIRROR_HORIZONTAL, + MMAL_PARAM_MIRROR_BOTH, +) = range(4) + +class MMAL_PARAMETER_MIRROR_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('value', MMAL_PARAM_MIRROR_T), + ] + +class MMAL_PARAMETER_URI_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('uri', ct.c_char * 200), + ] + +class MMAL_PARAMETER_ENCODING_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('encoding', ct.c_uint32 * 30), + ] + +class MMAL_PARAMETER_FRAME_RATE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('frame_rate', MMAL_RATIONAL_T), + ] + +class MMAL_PARAMETER_CONFIGFILE_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('file_size', ct.c_uint32), + ] + +class MMAL_PARAMETER_CONFIGFILE_CHUNK_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ('size', ct.c_uint32), + ('offset', ct.c_uint32), + ('data', ct.c_char_p), + ] + +# mmal_port.h ################################################################ + +MMAL_PORT_TYPE_T = ct.c_uint32 # enum +( + MMAL_PORT_TYPE_UNKNOWN, + MMAL_PORT_TYPE_CONTROL, + MMAL_PORT_TYPE_INPUT, + MMAL_PORT_TYPE_OUTPUT, + MMAL_PORT_TYPE_CLOCK, +) = range(5) +MMAL_PORT_TYPE_INVALID = 0xffffffff + +MMAL_PORT_CAPABILITY_PASSTHROUGH = 0x01 +MMAL_PORT_CAPABILITY_ALLOCATION = 0x02 +MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE = 0x04 + +class MMAL_PORT_PRIVATE_T(ct.Structure): + _fields_ = [] + +class MMAL_PORT_T(ct.Structure): + # NOTE Defined in mmal_component.h below after definition of MMAL_COMPONENT_T + pass + +mmal_port_format_commit = _lib.mmal_port_format_commit +mmal_port_format_commit.argtypes = [ct.POINTER(MMAL_PORT_T)] +mmal_port_format_commit.restype = MMAL_STATUS_T + +MMAL_PORT_BH_CB_T = ct.CFUNCTYPE( + None, + ct.POINTER(MMAL_PORT_T), ct.POINTER(MMAL_BUFFER_HEADER_T)) + +mmal_port_enable = _lib.mmal_port_enable +mmal_port_enable.argtypes = [ct.POINTER(MMAL_PORT_T), MMAL_PORT_BH_CB_T] +mmal_port_enable.restype = MMAL_STATUS_T + +mmal_port_disable = _lib.mmal_port_disable +mmal_port_disable.argtypes = [ct.POINTER(MMAL_PORT_T)] +mmal_port_disable.restype = MMAL_STATUS_T + +mmal_port_flush = _lib.mmal_port_flush +mmal_port_flush.argtypes = [ct.POINTER(MMAL_PORT_T)] +mmal_port_flush.restype = MMAL_STATUS_T + +mmal_port_parameter_set = _lib.mmal_port_parameter_set +mmal_port_parameter_set.argtypes = [ct.POINTER(MMAL_PORT_T), ct.POINTER(MMAL_PARAMETER_HEADER_T)] +mmal_port_parameter_set.restype = MMAL_STATUS_T + +mmal_port_parameter_get = _lib.mmal_port_parameter_get +mmal_port_parameter_get.argtypes = [ct.POINTER(MMAL_PORT_T), ct.POINTER(MMAL_PARAMETER_HEADER_T)] +mmal_port_parameter_get.restype = MMAL_STATUS_T + +mmal_port_send_buffer = _lib.mmal_port_send_buffer +mmal_port_send_buffer.argtypes = [ct.POINTER(MMAL_PORT_T), ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_port_send_buffer.restype = MMAL_STATUS_T + +mmal_port_connect = _lib.mmal_port_connect +mmal_port_connect.argtypes = [ct.POINTER(MMAL_PORT_T), ct.POINTER(MMAL_PORT_T)] +mmal_port_connect.restype = MMAL_STATUS_T + +mmal_port_disconnect = _lib.mmal_port_disconnect +mmal_port_disconnect.argtypes = [ct.POINTER(MMAL_PORT_T)] +mmal_port_disconnect.restype = MMAL_STATUS_T + +mmal_port_payload_alloc = _lib.mmal_port_payload_alloc +mmal_port_payload_alloc.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32] +mmal_port_payload_alloc.restype = ct.POINTER(ct.c_uint8) + +mmal_port_payload_free = _lib.mmal_port_payload_free +mmal_port_payload_free.argtypes = [ct.POINTER(MMAL_PORT_T), ct.POINTER(ct.c_uint8)] +mmal_port_payload_free.restype = None + +mmal_port_event_get = _lib.mmal_port_event_get +mmal_port_event_get.argtypes = [ct.POINTER(MMAL_PORT_T), ct.POINTER(ct.POINTER(MMAL_BUFFER_HEADER_T)), ct.c_uint32] +mmal_port_event_get.restype = MMAL_STATUS_T + +# mmal_component.h ########################################################### + +class MMAL_COMPONENT_PRIVATE_T(ct.Structure): + _fields_ = [] + +class MMAL_COMPONENT_T(ct.Structure): + _fields_ = [ + ('priv', ct.POINTER(MMAL_COMPONENT_PRIVATE_T)), + ('userdata', ct.c_void_p), + ('name', ct.c_char_p), + ('is_enabled', ct.c_uint32), + ('control', ct.POINTER(MMAL_PORT_T)), + ('input_num', ct.c_uint32), + ('input', ct.POINTER(ct.POINTER(MMAL_PORT_T))), + ('output_num', ct.c_uint32), + ('output', ct.POINTER(ct.POINTER(MMAL_PORT_T))), + ('clock_num', ct.c_uint32), + ('clock', ct.POINTER(ct.POINTER(MMAL_PORT_T))), + ('port_num', ct.c_uint32), + ('port', ct.POINTER(ct.POINTER(MMAL_PORT_T))), + ('id', ct.c_uint32), + ] + +# NOTE MMAL_PORT_T's fields are declared here as they reference +# MMAL_COMPONENT_T which in turn references MMAL_PORT_T, hence the empty +# forward decl in mmal_port.h above + +MMAL_PORT_T._fields_ = [ + ('priv', ct.POINTER(MMAL_PORT_PRIVATE_T)), + ('name', ct.c_char_p), + ('type', MMAL_PORT_TYPE_T), + ('index', ct.c_uint16), + ('index_all', ct.c_uint16), + ('is_enabled', ct.c_uint32), + ('format', ct.POINTER(MMAL_ES_FORMAT_T)), + ('buffer_num_min', ct.c_uint32), + ('buffer_size_min', ct.c_uint32), + ('buffer_alignment_min', ct.c_uint32), + ('buffer_num_recommended', ct.c_uint32), + ('buffer_size_recommended', ct.c_uint32), + ('buffer_num', ct.c_uint32), + ('buffer_size', ct.c_uint32), + ('component', ct.POINTER(MMAL_COMPONENT_T)), + ('userdata', ct.c_void_p), + ('capabilities', ct.c_uint32), + ] + +mmal_component_create = _lib.mmal_component_create +mmal_component_create.argtypes = [ct.c_char_p, ct.POINTER(ct.POINTER(MMAL_COMPONENT_T))] +mmal_component_create.restype = MMAL_STATUS_T + +mmal_component_acquire = _lib.mmal_component_acquire +mmal_component_acquire.argtypes = [ct.POINTER(MMAL_COMPONENT_T)] +mmal_component_acquire.restype = None + +mmal_component_release = _lib.mmal_component_release +mmal_component_release.argtypes = [ct.POINTER(MMAL_COMPONENT_T)] +mmal_component_release.restype = MMAL_STATUS_T + +mmal_component_destroy = _lib.mmal_component_destroy +mmal_component_destroy.argtypes = [ct.POINTER(MMAL_COMPONENT_T)] +mmal_component_destroy.restype = MMAL_STATUS_T + +mmal_component_enable = _lib.mmal_component_enable +mmal_component_enable.argtypes = [ct.POINTER(MMAL_COMPONENT_T)] +mmal_component_enable.restype = MMAL_STATUS_T + +mmal_component_disable = _lib.mmal_component_disable +mmal_component_disable.argtypes = [ct.POINTER(MMAL_COMPONENT_T)] +mmal_component_disable.restype = MMAL_STATUS_T + +# mmal_metadata.h ############################################################ + +# XXX This does not appear to be in libmmal.so... + +#MMAL_METADATA_HELLO_WORLD = MMAL_FOURCC('HELO') +# +#class MMAL_METADATA_T(ct.Structure): +# _fields_ = [ +# ('id', ct.c_uint32), +# ('size', ct.c_uint32), +# ] +# +#class MMAL_METADATA_HELLO_WORLD_T(ct.Structure): +# _fields_ = [ +# ('id', ct.c_uint32), +# ('size', ct.c_uint32), +# ('myvalue', ct.c_uint32), +# ] +# +#mmal_metadata_get = _lib.mmal_metadata_get +#mmal_metadata_get.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T), ct.c_uint32] +#mmal_metadata_get.restype = ct.POINTER(MMAL_METADATA_T) +# +#mmal_metadata_set = _lib.mmal_metadata_set +#mmal_metadata_set.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T), ct.POINTER(MMAL_METADATA_T)] +#mmal_metadata_set.restype = MMAL_STATUS_T + +# mmal_queue.h ############################################################### + +class MMAL_QUEUE_T(ct.Structure): + _fields_ = [] + +mmal_queue_create = _lib.mmal_queue_create +mmal_queue_create.argtypes = [] +mmal_queue_create.restype = ct.POINTER(MMAL_QUEUE_T) + +mmal_queue_put = _lib.mmal_queue_put +mmal_queue_put.argtypes = [ct.POINTER(MMAL_QUEUE_T), ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_queue_put.restype = None + +mmal_queue_put_back = _lib.mmal_queue_put_back +mmal_queue_put_back.argtypes = [ct.POINTER(MMAL_QUEUE_T), ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_queue_put_back.restype = None + +mmal_queue_get = _lib.mmal_queue_get +mmal_queue_get.argtypes = [ct.POINTER(MMAL_QUEUE_T)] +mmal_queue_get.restype = ct.POINTER(MMAL_BUFFER_HEADER_T) + +mmal_queue_wait = _lib.mmal_queue_wait +mmal_queue_wait.argtypes = [ct.POINTER(MMAL_QUEUE_T)] +mmal_queue_wait.restype = ct.POINTER(MMAL_BUFFER_HEADER_T) + +try: + mmal_queue_timedwait = _lib.mmal_queue_timedwait +except AttributeError: + # mmal_queue_timedwait doesn't exist in older firmwares. We don't use it + # anyway, so ignore it if we don't find it + pass +else: + mmal_queue_timedwait.argtypes = [ct.POINTER(MMAL_QUEUE_T), VCOS_UNSIGNED] + mmal_queue_timedwait.restype = ct.POINTER(MMAL_BUFFER_HEADER_T) + +mmal_queue_length = _lib.mmal_queue_length +mmal_queue_length.argtypes = [ct.POINTER(MMAL_QUEUE_T)] +mmal_queue_length.restype = ct.c_uint + +mmal_queue_destroy = _lib.mmal_queue_destroy +mmal_queue_destroy.argtypes = [ct.POINTER(MMAL_QUEUE_T)] +mmal_queue_destroy.restype = None + +# mmal_pool.h ################################################################ + +class MMAL_POOL_T(ct.Structure): + _fields_ = [ + ('queue', ct.POINTER(MMAL_QUEUE_T)), + ('headers_num', ct.c_uint32), + ('header', ct.POINTER(ct.POINTER(MMAL_BUFFER_HEADER_T))), + ] + +mmal_pool_allocator_alloc_t = ct.CFUNCTYPE( + None, + ct.c_void_p, ct.c_uint32) +mmal_pool_allocator_free_t = ct.CFUNCTYPE( + None, + ct.c_void_p, ct.c_void_p) + +mmal_pool_create = _lib.mmal_pool_create +mmal_pool_create.argtypes = [ct.c_uint, ct.c_uint32] +mmal_pool_create.restype = ct.POINTER(MMAL_POOL_T) + +mmal_pool_create_with_allocator = _lib.mmal_pool_create_with_allocator +mmal_pool_create_with_allocator.argtypes = [ + ct.c_uint, + ct.c_uint32, + ct.c_void_p, + mmal_pool_allocator_alloc_t, + mmal_pool_allocator_free_t, + ] +mmal_pool_create_with_allocator.restype = ct.POINTER(MMAL_POOL_T) + +mmal_pool_destroy = _lib.mmal_pool_destroy +mmal_pool_destroy.argtypes = [ct.POINTER(MMAL_POOL_T)] +mmal_pool_destroy.restype = None + +mmal_pool_resize = _lib.mmal_pool_resize +mmal_pool_resize.argtypes = [ct.POINTER(MMAL_POOL_T), ct.c_uint, ct.c_uint32] +mmal_pool_resize.restype = MMAL_STATUS_T + +MMAL_POOL_BH_CB_T = ct.CFUNCTYPE( + MMAL_BOOL_T, + ct.POINTER(MMAL_POOL_T), ct.POINTER(MMAL_BUFFER_HEADER_T), ct.c_void_p) + +mmal_pool_callback_set = _lib.mmal_pool_callback_set +mmal_pool_callback_set.argtypes = [ct.POINTER(MMAL_POOL_T), MMAL_POOL_BH_CB_T] +mmal_pool_callback_set.restype = None + +mmal_pool_pre_release_callback_set = _lib.mmal_pool_pre_release_callback_set +mmal_pool_pre_release_callback_set.argtypes = [ct.POINTER(MMAL_POOL_T), MMAL_BH_PRE_RELEASE_CB_T, ct.c_void_p] +mmal_pool_pre_release_callback_set.restype = None + +# mmal_events.h ############################################################## + +MMAL_EVENT_ERROR = MMAL_FOURCC('ERRO') +MMAL_EVENT_EOS = MMAL_FOURCC('EEOS') +MMAL_EVENT_FORMAT_CHANGED = MMAL_FOURCC('EFCH') +MMAL_EVENT_PARAMETER_CHANGED = MMAL_FOURCC('EPCH') + +class MMAL_EVENT_END_OF_STREAM_T(ct.Structure): + _fields_ = [ + ('port_type', MMAL_PORT_TYPE_T), + ('port_index', ct.c_uint32), + ] + +class MMAL_EVENT_FORMAT_CHANGED_T(ct.Structure): + _fields_ = [ + ('buffer_size_min', ct.c_uint32), + ('buffer_num_min', ct.c_uint32), + ('buffer_size_recommended', ct.c_uint32), + ('buffer_num_recommended', ct.c_uint32), + ('format', ct.POINTER(MMAL_ES_FORMAT_T)), + ] + +class MMAL_EVENT_PARAMETER_CHANGED_T(ct.Structure): + _fields_ = [ + ('hdr', MMAL_PARAMETER_HEADER_T), + ] + +mmal_event_format_changed_get = _lib.mmal_event_format_changed_get +mmal_event_format_changed_get.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_event_format_changed_get.restype = ct.POINTER(MMAL_EVENT_FORMAT_CHANGED_T) + +# mmal_encodings.h ########################################################### + +MMAL_ENCODING_H264 = MMAL_FOURCC('H264') +MMAL_ENCODING_MVC = MMAL_FOURCC('MVC ') +MMAL_ENCODING_H263 = MMAL_FOURCC('H263') +MMAL_ENCODING_MP4V = MMAL_FOURCC('MP4V') +MMAL_ENCODING_MP2V = MMAL_FOURCC('MP2V') +MMAL_ENCODING_MP1V = MMAL_FOURCC('MP1V') +MMAL_ENCODING_WMV3 = MMAL_FOURCC('WMV3') +MMAL_ENCODING_WMV2 = MMAL_FOURCC('WMV2') +MMAL_ENCODING_WMV1 = MMAL_FOURCC('WMV1') +MMAL_ENCODING_WVC1 = MMAL_FOURCC('WVC1') +MMAL_ENCODING_VP8 = MMAL_FOURCC('VP8 ') +MMAL_ENCODING_VP7 = MMAL_FOURCC('VP7 ') +MMAL_ENCODING_VP6 = MMAL_FOURCC('VP6 ') +MMAL_ENCODING_THEORA = MMAL_FOURCC('THEO') +MMAL_ENCODING_SPARK = MMAL_FOURCC('SPRK') +MMAL_ENCODING_MJPEG = MMAL_FOURCC('MJPG') + +MMAL_ENCODING_JPEG = MMAL_FOURCC('JPEG') +MMAL_ENCODING_GIF = MMAL_FOURCC('GIF ') +MMAL_ENCODING_PNG = MMAL_FOURCC('PNG ') +MMAL_ENCODING_PPM = MMAL_FOURCC('PPM ') +MMAL_ENCODING_TGA = MMAL_FOURCC('TGA ') +MMAL_ENCODING_BMP = MMAL_FOURCC('BMP ') + +MMAL_ENCODING_I420 = MMAL_FOURCC('I420') +MMAL_ENCODING_I420_SLICE = MMAL_FOURCC('S420') +MMAL_ENCODING_YV12 = MMAL_FOURCC('YV12') +MMAL_ENCODING_I422 = MMAL_FOURCC('I422') +MMAL_ENCODING_I422_SLICE = MMAL_FOURCC('S422') +MMAL_ENCODING_YUYV = MMAL_FOURCC('YUYV') +MMAL_ENCODING_YVYU = MMAL_FOURCC('YVYU') +MMAL_ENCODING_UYVY = MMAL_FOURCC('UYVY') +MMAL_ENCODING_VYUY = MMAL_FOURCC('VYUY') +MMAL_ENCODING_NV12 = MMAL_FOURCC('NV12') +MMAL_ENCODING_NV21 = MMAL_FOURCC('NV21') +MMAL_ENCODING_ARGB = MMAL_FOURCC('ARGB') +MMAL_ENCODING_ARGB_SLICE = MMAL_FOURCC('argb') +MMAL_ENCODING_RGBA = MMAL_FOURCC('RGBA') +MMAL_ENCODING_RGBA_SLICE = MMAL_FOURCC('rgba') +MMAL_ENCODING_ABGR = MMAL_FOURCC('ABGR') +MMAL_ENCODING_ABGR_SLICE = MMAL_FOURCC('abgr') +MMAL_ENCODING_BGRA = MMAL_FOURCC('BGRA') +MMAL_ENCODING_BGRA_SLICE = MMAL_FOURCC('bgra') +MMAL_ENCODING_RGB16 = MMAL_FOURCC('RGB2') +MMAL_ENCODING_RGB16_SLICE = MMAL_FOURCC('rgb2') +MMAL_ENCODING_RGB24 = MMAL_FOURCC('RGB3') +MMAL_ENCODING_RGB24_SLICE = MMAL_FOURCC('rgb3') +MMAL_ENCODING_RGB32 = MMAL_FOURCC('RGB4') +MMAL_ENCODING_RGB32_SLICE = MMAL_FOURCC('rgb4') +MMAL_ENCODING_BGR16 = MMAL_FOURCC('BGR2') +MMAL_ENCODING_BGR16_SLICE = MMAL_FOURCC('bgr2') +MMAL_ENCODING_BGR24 = MMAL_FOURCC('BGR3') +MMAL_ENCODING_BGR24_SLICE = MMAL_FOURCC('bgr3') +MMAL_ENCODING_BGR32 = MMAL_FOURCC('BGR4') +MMAL_ENCODING_BGR32_SLICE = MMAL_FOURCC('bgr4') + +MMAL_ENCODING_BAYER_SBGGR10P = MMAL_FOURCC('pBAA') +MMAL_ENCODING_BAYER_SGRBG10P = MMAL_FOURCC('pgAA') +MMAL_ENCODING_BAYER_SGBRG10P = MMAL_FOURCC('pGAA') +MMAL_ENCODING_BAYER_SRGGB10P = MMAL_FOURCC('PRAA') +MMAL_ENCODING_BAYER_SBGGR8 = MMAL_FOURCC('BA81') +MMAL_ENCODING_BAYER_SGBRG8 = MMAL_FOURCC('GBRG') +MMAL_ENCODING_BAYER_SGRBG8 = MMAL_FOURCC('GRBG') +MMAL_ENCODING_BAYER_SRGGB8 = MMAL_FOURCC('RGGB') +MMAL_ENCODING_BAYER_SBGGR12P = MMAL_FOURCC('BY12') +MMAL_ENCODING_BAYER_SBGGR16 = MMAL_FOURCC('BYR2') +MMAL_ENCODING_BAYER_SBGGR10DPCM8 = MMAL_FOURCC('bBA8') + +MMAL_ENCODING_YUVUV128 = MMAL_FOURCC('SAND') +MMAL_ENCODING_OPAQUE = MMAL_FOURCC('OPQV') + +MMAL_ENCODING_EGL_IMAGE = MMAL_FOURCC('EGLI') +MMAL_ENCODING_PCM_UNSIGNED_BE = MMAL_FOURCC('PCMU') +MMAL_ENCODING_PCM_UNSIGNED_LE = MMAL_FOURCC('pcmu') +MMAL_ENCODING_PCM_SIGNED_BE = MMAL_FOURCC('PCMS') +MMAL_ENCODING_PCM_SIGNED_LE = MMAL_FOURCC('pcms') +MMAL_ENCODING_PCM_FLOAT_BE = MMAL_FOURCC('PCMF') +MMAL_ENCODING_PCM_FLOAT_LE = MMAL_FOURCC('pcmf') +MMAL_ENCODING_PCM_UNSIGNED = MMAL_ENCODING_PCM_UNSIGNED_LE +MMAL_ENCODING_PCM_SIGNED = MMAL_ENCODING_PCM_SIGNED_LE +MMAL_ENCODING_PCM_FLOAT = MMAL_ENCODING_PCM_FLOAT_LE + +MMAL_ENCODING_MP4A = MMAL_FOURCC('MP4A') +MMAL_ENCODING_MPGA = MMAL_FOURCC('MPGA') +MMAL_ENCODING_ALAW = MMAL_FOURCC('ALAW') +MMAL_ENCODING_MULAW = MMAL_FOURCC('ULAW') +MMAL_ENCODING_ADPCM_MS = MMAL_FOURCC('MS\x00\x02') +MMAL_ENCODING_ADPCM_IMA_MS = MMAL_FOURCC('MS\x00\x01') +MMAL_ENCODING_ADPCM_SWF = MMAL_FOURCC('ASWF') +MMAL_ENCODING_WMA1 = MMAL_FOURCC('WMA1') +MMAL_ENCODING_WMA2 = MMAL_FOURCC('WMA2') +MMAL_ENCODING_WMAP = MMAL_FOURCC('WMAP') +MMAL_ENCODING_WMAL = MMAL_FOURCC('WMAL') +MMAL_ENCODING_WMAV = MMAL_FOURCC('WMAV') +MMAL_ENCODING_AMRNB = MMAL_FOURCC('AMRN') +MMAL_ENCODING_AMRWB = MMAL_FOURCC('AMRW') +MMAL_ENCODING_AMRWBP = MMAL_FOURCC('AMRP') +MMAL_ENCODING_AC3 = MMAL_FOURCC('AC3 ') +MMAL_ENCODING_EAC3 = MMAL_FOURCC('EAC3') +MMAL_ENCODING_DTS = MMAL_FOURCC('DTS ') +MMAL_ENCODING_MLP = MMAL_FOURCC('MLP ') +MMAL_ENCODING_FLAC = MMAL_FOURCC('FLAC') +MMAL_ENCODING_VORBIS = MMAL_FOURCC('VORB') +MMAL_ENCODING_SPEEX = MMAL_FOURCC('SPX ') +MMAL_ENCODING_ATRAC3 = MMAL_FOURCC('ATR3') +MMAL_ENCODING_ATRACX = MMAL_FOURCC('ATRX') +MMAL_ENCODING_ATRACL = MMAL_FOURCC('ATRL') +MMAL_ENCODING_MIDI = MMAL_FOURCC('MIDI') +MMAL_ENCODING_EVRC = MMAL_FOURCC('EVRC') +MMAL_ENCODING_NELLYMOSER = MMAL_FOURCC('NELY') +MMAL_ENCODING_QCELP = MMAL_FOURCC('QCEL') +MMAL_ENCODING_MP4V_DIVX_DRM = MMAL_FOURCC('M4VD') + +MMAL_ENCODING_VARIANT_H264_DEFAULT = 0 +MMAL_ENCODING_VARIANT_H264_AVC1 = MMAL_FOURCC('AVC1') +MMAL_ENCODING_VARIANT_H264_RAW = MMAL_FOURCC('RAW ') +MMAL_ENCODING_VARIANT_MP4A_DEFAULT = 0 +MMAL_ENCODING_VARIANT_MP4A_ADTS = MMAL_FOURCC('ADTS') + +MMAL_COLOR_SPACE_UNKNOWN = 0 +MMAL_COLOR_SPACE_ITUR_BT601 = MMAL_FOURCC('Y601') +MMAL_COLOR_SPACE_ITUR_BT709 = MMAL_FOURCC('Y709') +MMAL_COLOR_SPACE_JPEG_JFIF = MMAL_FOURCC('YJFI') +MMAL_COLOR_SPACE_FCC = MMAL_FOURCC('YFCC') +MMAL_COLOR_SPACE_SMPTE240M = MMAL_FOURCC('Y240') +MMAL_COLOR_SPACE_BT470_2_M = MMAL_FOURCC('Y__M') +MMAL_COLOR_SPACE_BT470_2_BG = MMAL_FOURCC('Y_BG') +MMAL_COLOR_SPACE_JFIF_Y16_255 = MMAL_FOURCC('YY16') + +# util/mmal_default_components.h ############################################# + +MMAL_COMPONENT_DEFAULT_VIDEO_DECODER = b"vc.ril.video_decode" +MMAL_COMPONENT_DEFAULT_VIDEO_ENCODER = b"vc.ril.video_encode" +MMAL_COMPONENT_DEFAULT_VIDEO_RENDERER = b"vc.ril.video_render" +MMAL_COMPONENT_DEFAULT_IMAGE_DECODER = b"vc.ril.image_decode" +MMAL_COMPONENT_DEFAULT_IMAGE_ENCODER = b"vc.ril.image_encode" +MMAL_COMPONENT_DEFAULT_CAMERA = b"vc.ril.camera" +MMAL_COMPONENT_DEFAULT_VIDEO_CONVERTER = b"vc.video_convert" +MMAL_COMPONENT_DEFAULT_SPLITTER = b"vc.splitter" +MMAL_COMPONENT_DEFAULT_SCHEDULER = b"vc.scheduler" +MMAL_COMPONENT_DEFAULT_VIDEO_INJECTER = b"vc.video_inject" +MMAL_COMPONENT_DEFAULT_VIDEO_SPLITTER = b"vc.ril.video_splitter" +MMAL_COMPONENT_DEFAULT_AUDIO_DECODER = b"none" +MMAL_COMPONENT_DEFAULT_AUDIO_RENDERER = b"vc.ril.audio_render" +MMAL_COMPONENT_DEFAULT_MIRACAST = b"vc.miracast" +MMAL_COMPONENT_DEFAULT_CLOCK = b"vc.clock" +MMAL_COMPONENT_DEFAULT_CAMERA_INFO = b"vc.camera_info" +# The following two components aren't in the MMAL headers, but do exist +MMAL_COMPONENT_DEFAULT_NULL_SINK = b"vc.null_sink" +MMAL_COMPONENT_DEFAULT_RESIZER = b"vc.ril.resize" +MMAL_COMPONENT_DEFAULT_ISP = b"vc.ril.isp" +MMAL_COMPONENT_RAW_CAMERA = b"vc.ril.rawcam" + +# util/mmal_util_params.h #################################################### + +mmal_port_parameter_set_boolean = _lib.mmal_port_parameter_set_boolean +mmal_port_parameter_set_boolean.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, MMAL_BOOL_T] +mmal_port_parameter_set_boolean.restype = MMAL_STATUS_T + +mmal_port_parameter_get_boolean = _lib.mmal_port_parameter_get_boolean +mmal_port_parameter_get_boolean.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.POINTER(MMAL_BOOL_T)] +mmal_port_parameter_get_boolean.restype = MMAL_STATUS_T + +mmal_port_parameter_set_uint64 = _lib.mmal_port_parameter_set_uint64 +mmal_port_parameter_set_uint64.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.c_uint64] +mmal_port_parameter_set_uint64.restype = MMAL_STATUS_T + +mmal_port_parameter_get_uint64 = _lib.mmal_port_parameter_get_uint64 +mmal_port_parameter_get_uint64.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.POINTER(ct.c_uint64)] +mmal_port_parameter_get_uint64.restype = MMAL_STATUS_T + +mmal_port_parameter_set_int64 = _lib.mmal_port_parameter_set_int64 +mmal_port_parameter_set_int64.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.c_int64] +mmal_port_parameter_set_int64.restype = MMAL_STATUS_T + +mmal_port_parameter_get_int64 = _lib.mmal_port_parameter_get_int64 +mmal_port_parameter_get_int64.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.POINTER(ct.c_int64)] +mmal_port_parameter_get_int64.restype = MMAL_STATUS_T + +mmal_port_parameter_set_uint32 = _lib.mmal_port_parameter_set_uint32 +mmal_port_parameter_set_uint32.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.c_uint32] +mmal_port_parameter_set_uint32.restype = MMAL_STATUS_T + +mmal_port_parameter_get_uint32 = _lib.mmal_port_parameter_get_uint32 +mmal_port_parameter_get_uint32.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.POINTER(ct.c_uint32)] +mmal_port_parameter_get_uint32.restype = MMAL_STATUS_T + +mmal_port_parameter_set_int32 = _lib.mmal_port_parameter_set_int32 +mmal_port_parameter_set_int32.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.c_int32] +mmal_port_parameter_set_int32.restype = MMAL_STATUS_T + +mmal_port_parameter_get_int32 = _lib.mmal_port_parameter_get_int32 +mmal_port_parameter_get_int32.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.POINTER(ct.c_int32)] +mmal_port_parameter_get_int32.restype = MMAL_STATUS_T + +mmal_port_parameter_set_rational = _lib.mmal_port_parameter_set_rational +mmal_port_parameter_set_rational.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, MMAL_RATIONAL_T] +mmal_port_parameter_set_rational.restype = MMAL_STATUS_T + +mmal_port_parameter_get_rational = _lib.mmal_port_parameter_get_rational +mmal_port_parameter_get_rational.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.POINTER(MMAL_RATIONAL_T)] +mmal_port_parameter_get_rational.restype = MMAL_STATUS_T + +mmal_port_parameter_set_string = _lib.mmal_port_parameter_set_string +mmal_port_parameter_set_string.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.c_char_p] +mmal_port_parameter_set_string.restype = MMAL_STATUS_T + +mmal_port_parameter_set_bytes = _lib.mmal_port_parameter_set_bytes +mmal_port_parameter_set_bytes.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.POINTER(ct.c_uint8), ct.c_uint] +mmal_port_parameter_set_bytes.restype = MMAL_STATUS_T + +mmal_util_port_set_uri = _lib.mmal_util_port_set_uri +mmal_util_port_set_uri.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_char_p] +mmal_util_port_set_uri.restype = MMAL_STATUS_T + +mmal_util_set_display_region = _lib.mmal_util_set_display_region +mmal_util_set_display_region.argtypes = [ct.POINTER(MMAL_PORT_T), ct.POINTER(MMAL_DISPLAYREGION_T)] +mmal_util_set_display_region.restype = MMAL_STATUS_T + +mmal_util_camera_use_stc_timestamp = _lib.mmal_util_camera_use_stc_timestamp +mmal_util_camera_use_stc_timestamp.argtypes = [ct.POINTER(MMAL_PORT_T), MMAL_CAMERA_STC_MODE_T] +mmal_util_camera_use_stc_timestamp.restype = MMAL_STATUS_T + +mmal_util_get_core_port_stats = _lib.mmal_util_get_core_port_stats +mmal_util_get_core_port_stats.argtypes = [ct.POINTER(MMAL_PORT_T), MMAL_CORE_STATS_DIR, MMAL_BOOL_T, ct.POINTER(MMAL_CORE_STATISTICS_T)] +mmal_util_get_core_port_stats.restype = MMAL_STATUS_T + +# util/mmal_connection.h ##################################################### + +MMAL_CONNECTION_FLAG_TUNNELLING = 0x1 +MMAL_CONNECTION_FLAG_ALLOCATION_ON_INPUT = 0x2 +MMAL_CONNECTION_FLAG_ALLOCATION_ON_OUTPUT = 0x4 +MMAL_CONNECTION_FLAG_KEEP_BUFFER_REQUIREMENTS = 0x8 +MMAL_CONNECTION_FLAG_DIRECT = 0x10 + +class MMAL_CONNECTION_T(ct.Structure): + # Forward type declaration + pass + +MMAL_CONNECTION_CALLBACK_T = ct.CFUNCTYPE( + None, + ct.POINTER(MMAL_CONNECTION_T)) + +MMAL_CONNECTION_T._fields_ = [ + ('user_data', ct.c_void_p), + ('callback', MMAL_CONNECTION_CALLBACK_T), + ('is_enabled', ct.c_uint32), + ('flags', ct.c_uint32), + # Originally "in", but this is a Python keyword + ('in_', ct.POINTER(MMAL_PORT_T)), + ('out', ct.POINTER(MMAL_PORT_T)), + ('pool', ct.POINTER(MMAL_POOL_T)), + ('queue', ct.POINTER(MMAL_QUEUE_T)), + ('name', ct.c_char_p), + ('time_setup', ct.c_int64), + ('time_enable', ct.c_int64), + ('time_disable', ct.c_int64), + ] + +mmal_connection_create = _lib.mmal_connection_create +mmal_connection_create.argtypes = [ct.POINTER(ct.POINTER(MMAL_CONNECTION_T)), ct.POINTER(MMAL_PORT_T), ct.POINTER(MMAL_PORT_T), ct.c_uint32] +mmal_connection_create.restype = MMAL_STATUS_T + +mmal_connection_acquire = _lib.mmal_connection_acquire +mmal_connection_acquire.argtypes = [ct.POINTER(MMAL_CONNECTION_T)] +mmal_connection_acquire.restype = None + +mmal_connection_release = _lib.mmal_connection_release +mmal_connection_release.argtypes = [ct.POINTER(MMAL_CONNECTION_T)] +mmal_connection_release.restype = MMAL_STATUS_T + +mmal_connection_destroy = _lib.mmal_connection_destroy +mmal_connection_destroy.argtypes = [ct.POINTER(MMAL_CONNECTION_T)] +mmal_connection_destroy.restype = MMAL_STATUS_T + +mmal_connection_enable = _lib.mmal_connection_enable +mmal_connection_enable.argtypes = [ct.POINTER(MMAL_CONNECTION_T)] +mmal_connection_enable.restype = MMAL_STATUS_T + +mmal_connection_disable = _lib.mmal_connection_disable +mmal_connection_disable.argtypes = [ct.POINTER(MMAL_CONNECTION_T)] +mmal_connection_disable.restype = MMAL_STATUS_T + +mmal_connection_event_format_changed = _lib.mmal_connection_event_format_changed +mmal_connection_event_format_changed.argtypes = [ct.POINTER(MMAL_CONNECTION_T), ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_connection_event_format_changed.restype = MMAL_STATUS_T + +# util/mmal_util.h ########################################################### + +mmal_status_to_string = _lib.mmal_status_to_string +mmal_status_to_string.argtypes = [MMAL_STATUS_T] +mmal_status_to_string.restype = ct.c_char_p + +mmal_encoding_stride_to_width = _lib.mmal_encoding_stride_to_width +mmal_encoding_stride_to_width.argtypes = [ct.c_uint32, ct.c_uint32] +mmal_encoding_stride_to_width.restype = ct.c_uint32 + +mmal_encoding_width_to_stride = _lib.mmal_encoding_width_to_stride +mmal_encoding_width_to_stride.argtypes = [ct.c_uint32, ct.c_uint32] +mmal_encoding_width_to_stride.restype = ct.c_uint32 + +mmal_port_type_to_string = _lib.mmal_port_type_to_string +mmal_port_type_to_string.argtypes = [MMAL_PORT_TYPE_T] +mmal_port_type_to_string.restype = ct.c_char_p + +mmal_port_parameter_alloc_get = _lib.mmal_port_parameter_alloc_get +mmal_port_parameter_alloc_get.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint32, ct.c_uint32, ct.POINTER(MMAL_STATUS_T)] +mmal_port_parameter_alloc_get.restype = ct.POINTER(MMAL_PARAMETER_HEADER_T) + +mmal_port_parameter_free = _lib.mmal_port_parameter_free +mmal_port_parameter_free.argtypes = [ct.POINTER(MMAL_PARAMETER_HEADER_T)] +mmal_port_parameter_free.restype = None + +mmal_buffer_header_copy_header = _lib.mmal_buffer_header_copy_header +mmal_buffer_header_copy_header.argtypes = [ct.POINTER(MMAL_BUFFER_HEADER_T), ct.POINTER(MMAL_BUFFER_HEADER_T)] +mmal_buffer_header_copy_header.restype = None + +mmal_port_pool_create = _lib.mmal_port_pool_create +mmal_port_pool_create.argtypes = [ct.POINTER(MMAL_PORT_T), ct.c_uint, ct.c_uint32] +mmal_port_pool_create.restype = ct.POINTER(MMAL_POOL_T) + +mmal_port_pool_destroy = _lib.mmal_port_pool_destroy +mmal_port_pool_destroy.argtypes = [ct.POINTER(MMAL_PORT_T), ct.POINTER(MMAL_POOL_T)] +mmal_port_pool_destroy.restype = None + +mmal_log_dump_port = _lib.mmal_log_dump_port +mmal_log_dump_port.argtypes = [ct.POINTER(MMAL_PORT_T)] +mmal_log_dump_port.restype = None + +mmal_log_dump_format = _lib.mmal_log_dump_format +mmal_log_dump_format.argtypes = [ct.POINTER(MMAL_ES_FORMAT_T)] +mmal_log_dump_format.restype = None + +mmal_util_get_port = _lib.mmal_util_get_port +mmal_util_get_port.argtypes = [ct.POINTER(MMAL_COMPONENT_T), MMAL_PORT_TYPE_T, ct.c_uint] +mmal_util_get_port.restype = ct.POINTER(MMAL_PORT_T) + +mmal_4cc_to_string = _lib.mmal_4cc_to_string +mmal_4cc_to_string.argtypes = [ct.c_char_p, ct.c_size_t, ct.c_uint32] +mmal_4cc_to_string.restype = ct.c_char_p + diff --git a/picamera/mmalobj.py b/picamera/mmalobj.py new file mode 100644 index 0000000..ec167f2 --- /dev/null +++ b/picamera/mmalobj.py @@ -0,0 +1,3736 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python header conversion +# Copyright (c) 2013-2017 Dave Jones +# +# Original headers +# Copyright (c) 2012, Broadcom Europe Ltd +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str equivalent to Py3's +str = type('') + +import io +import ctypes as ct +import warnings +import weakref +from threading import Thread, Event +from collections import namedtuple +from fractions import Fraction +from itertools import cycle +from functools import reduce +from operator import mul + +from . import bcm_host, mmal +from .streams import BufferIO +from .exc import ( + mmal_check, + PiCameraValueError, + PiCameraRuntimeError, + PiCameraMMALError, + PiCameraPortDisabled, + PiCameraDeprecated, + ) + + +# Old firmwares confuse the RGB24 and BGR24 encodings. This flag tracks whether +# the order needs fixing (it is set during MMALCamera.__init__). +FIX_RGB_BGR_ORDER = None + +# Mapping of parameters to the C-structure they expect / return. If a parameter +# does not appear in this mapping, it cannot be queried / set with the +# MMALControlPort.params attribute. +PARAM_TYPES = { + mmal.MMAL_PARAMETER_ALGORITHM_CONTROL: mmal.MMAL_PARAMETER_ALGORITHM_CONTROL_T, + mmal.MMAL_PARAMETER_ANNOTATE: None, # adjusted by MMALCamera.annotate_rev + mmal.MMAL_PARAMETER_ANTISHAKE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET: mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET_T, + mmal.MMAL_PARAMETER_AWB_MODE: mmal.MMAL_PARAMETER_AWBMODE_T, + mmal.MMAL_PARAMETER_BRIGHTNESS: mmal.MMAL_PARAMETER_RATIONAL_T, + mmal.MMAL_PARAMETER_BUFFER_FLAG_FILTER: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS: mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS_T, + mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE: mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE_T, + mmal.MMAL_PARAMETER_CAMERA_CONFIG: mmal.MMAL_PARAMETER_CAMERA_CONFIG_T, + mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_CAMERA_INFO: None, # adjusted by MMALCameraInfo.info_rev + mmal.MMAL_PARAMETER_CAMERA_INTERFACE: mmal.MMAL_PARAMETER_CAMERA_INTERFACE_T, + mmal.MMAL_PARAMETER_CAMERA_ISP_BLOCK_OVERRIDE: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_CAMERA_MIN_ISO: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_CAMERA_NUM: mmal.MMAL_PARAMETER_INT32_T, + mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG: mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG_T, + mmal.MMAL_PARAMETER_CAMERA_RX_TIMING: mmal.MMAL_PARAMETER_CAMERA_RX_TIMING_T, + mmal.MMAL_PARAMETER_CAMERA_SETTINGS: mmal.MMAL_PARAMETER_CAMERA_SETTINGS_T, + mmal.MMAL_PARAMETER_CAMERA_USE_CASE: mmal.MMAL_PARAMETER_CAMERA_USE_CASE_T, + mmal.MMAL_PARAMETER_CAPTURE_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T, + mmal.MMAL_PARAMETER_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_CAPTURE_MODE: mmal.MMAL_PARAMETER_CAPTUREMODE_T, + mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_CAPTURE_STATUS: mmal.MMAL_PARAMETER_CAPTURE_STATUS_T, + mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST: mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST_T, + mmal.MMAL_PARAMETER_CLOCK_ACTIVE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD_T, + mmal.MMAL_PARAMETER_CLOCK_ENABLE_BUFFER_INFO: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_CLOCK_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, + mmal.MMAL_PARAMETER_CLOCK_LATENCY: mmal.MMAL_PARAMETER_CLOCK_LATENCY_T, + mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD_T, + mmal.MMAL_PARAMETER_CLOCK_SCALE: mmal.MMAL_PARAMETER_RATIONAL_T, + mmal.MMAL_PARAMETER_CLOCK_TIME: mmal.MMAL_PARAMETER_INT64_T, + mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD_T, + mmal.MMAL_PARAMETER_COLOUR_EFFECT: mmal.MMAL_PARAMETER_COLOURFX_T, + mmal.MMAL_PARAMETER_CONTRAST: mmal.MMAL_PARAMETER_RATIONAL_T, + mmal.MMAL_PARAMETER_CORE_STATISTICS: mmal.MMAL_PARAMETER_CORE_STATISTICS_T, + mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS: mmal.MMAL_PARAMETER_AWB_GAINS_T, + mmal.MMAL_PARAMETER_DISPLAYREGION: mmal.MMAL_DISPLAYREGION_T, + mmal.MMAL_PARAMETER_DPF_CONFIG: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION: mmal.MMAL_PARAMETER_DRC_T, + mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_EXIF_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_EXIF: mmal.MMAL_PARAMETER_EXIF_T, + mmal.MMAL_PARAMETER_EXP_METERING_MODE: mmal.MMAL_PARAMETER_EXPOSUREMETERINGMODE_T, + mmal.MMAL_PARAMETER_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T, + mmal.MMAL_PARAMETER_EXPOSURE_MODE: mmal.MMAL_PARAMETER_EXPOSUREMODE_T, + mmal.MMAL_PARAMETER_EXTRA_BUFFERS: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_FIELD_OF_VIEW: mmal.MMAL_PARAMETER_FIELD_OF_VIEW_T, + mmal.MMAL_PARAMETER_FLASH: mmal.MMAL_PARAMETER_FLASH_T, + mmal.MMAL_PARAMETER_FLASH_REQUIRED: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_FLASH_SELECT: mmal.MMAL_PARAMETER_FLASH_SELECT_T, + mmal.MMAL_PARAMETER_FLICKER_AVOID: mmal.MMAL_PARAMETER_FLICKERAVOID_T, + mmal.MMAL_PARAMETER_FOCUS: mmal.MMAL_PARAMETER_FOCUS_T, + mmal.MMAL_PARAMETER_FOCUS_REGIONS: mmal.MMAL_PARAMETER_FOCUS_REGIONS_T, + mmal.MMAL_PARAMETER_FOCUS_STATUS: mmal.MMAL_PARAMETER_FOCUS_STATUS_T, + mmal.MMAL_PARAMETER_FPS_RANGE: mmal.MMAL_PARAMETER_FPS_RANGE_T, + mmal.MMAL_PARAMETER_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway... + mmal.MMAL_PARAMETER_IMAGE_EFFECT: mmal.MMAL_PARAMETER_IMAGEFX_T, + mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS: mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T, + mmal.MMAL_PARAMETER_INPUT_CROP: mmal.MMAL_PARAMETER_INPUT_CROP_T, + mmal.MMAL_PARAMETER_INTRAPERIOD: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_ISO: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_JPEG_ATTACH_LOG: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_JPEG_Q_FACTOR: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_JPEG_RESTART_INTERVAL: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_LOCKSTEP_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_LOGGING: mmal.MMAL_PARAMETER_LOGGING_T, + mmal.MMAL_PARAMETER_MB_ROWS_PER_SLICE: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_MEM_USAGE: mmal.MMAL_PARAMETER_MEM_USAGE_T, + mmal.MMAL_PARAMETER_MINIMISE_FRAGMENTATION: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_MIRROR: mmal.MMAL_PARAMETER_UINT32_T, # actually mmal.MMAL_PARAMETER_MIRROR_T but this just contains a uint32 + mmal.MMAL_PARAMETER_NALUNITFORMAT: mmal.MMAL_PARAMETER_VIDEO_NALUNITFORMAT_T, + mmal.MMAL_PARAMETER_NO_IMAGE_PADDING: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_POWERMON_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_PRIVACY_INDICATOR: mmal.MMAL_PARAMETER_PRIVACY_INDICATOR_T, + mmal.MMAL_PARAMETER_PROFILE: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T, + mmal.MMAL_PARAMETER_RATECONTROL: mmal.MMAL_PARAMETER_VIDEO_RATECONTROL_T, + mmal.MMAL_PARAMETER_REDEYE: mmal.MMAL_PARAMETER_REDEYE_T, + mmal.MMAL_PARAMETER_ROTATION: mmal.MMAL_PARAMETER_INT32_T, + mmal.MMAL_PARAMETER_SATURATION: mmal.MMAL_PARAMETER_RATIONAL_T, + mmal.MMAL_PARAMETER_SEEK: mmal.MMAL_PARAMETER_SEEK_T, + mmal.MMAL_PARAMETER_SENSOR_INFORMATION: mmal.MMAL_PARAMETER_SENSOR_INFORMATION_T, + mmal.MMAL_PARAMETER_SHARPNESS: mmal.MMAL_PARAMETER_RATIONAL_T, + mmal.MMAL_PARAMETER_SHUTTER_SPEED: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_STATISTICS: mmal.MMAL_PARAMETER_STATISTICS_T, + mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE: mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T, + mmal.MMAL_PARAMETER_STILLS_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS: mmal.MMAL_PARAMETER_ENCODING_T, + mmal.MMAL_PARAMETER_SUPPORTED_PROFILES: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T, + mmal.MMAL_PARAMETER_SW_SATURATION_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_SW_SHARPEN_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_SYSTEM_TIME: mmal.MMAL_PARAMETER_UINT64_T, + mmal.MMAL_PARAMETER_THUMBNAIL_CONFIGURATION: mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T, + mmal.MMAL_PARAMETER_URI: mmal.MMAL_PARAMETER_URI_T, + mmal.MMAL_PARAMETER_USE_STC: mmal.MMAL_PARAMETER_CAMERA_STC_MODE_T, + mmal.MMAL_PARAMETER_VIDEO_ALIGN_HORIZ: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_VIDEO_ALIGN_VERT: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_VIDEO_BIT_RATE: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_VIDEO_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_VIDEO_DROPPABLE_PFRAMES: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE: mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE_T, + mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE: mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_FRAME_LIMIT_BITS: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_VECTORS: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_PEAK_RATE: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_QP_P: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL: mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_SLICE_DQUANT: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_SEI_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_VIDEO_ENCODE_SPS_TIMING: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_VIDEO_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway... + mmal.MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE: mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE_T, + mmal.MMAL_PARAMETER_VIDEO_INTERPOLATE_TIMESTAMPS: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH: mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T, + mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION: mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION_T, + mmal.MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS: mmal.MMAL_PARAMETER_UINT32_T, + mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS: mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS_T, + mmal.MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_VIDEO_STABILISATION: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_ZERO_COPY: mmal.MMAL_PARAMETER_BOOLEAN_T, + mmal.MMAL_PARAMETER_ZERO_SHUTTER_LAG: mmal.MMAL_PARAMETER_ZEROSHUTTERLAG_T, + mmal.MMAL_PARAMETER_ZOOM: mmal.MMAL_PARAMETER_SCALEFACTOR_T, + } + + +class PiCameraFraction(Fraction): + """ + Extends :class:`~fractions.Fraction` to act as a (numerator, denominator) + tuple when required. + """ + def __len__(self): + warnings.warn( + PiCameraDeprecated( + 'Accessing framerate as a tuple is deprecated; this value is ' + 'now a Fraction, so you can query the numerator and ' + 'denominator properties directly, convert to an int or float, ' + 'or perform arithmetic operations and comparisons directly')) + return 2 + + def __getitem__(self, index): + warnings.warn( + PiCameraDeprecated( + 'Accessing framerate as a tuple is deprecated; this value is ' + 'now a Fraction, so you can query the numerator and ' + 'denominator properties directly, convert to an int or float, ' + 'or perform arithmetic operations and comparisons directly')) + if index == 0: + return self.numerator + elif index == 1: + return self.denominator + else: + raise IndexError('invalid index %d' % index) + + def __contains__(self, value): + return value in (self.numerator, self.denominator) + + +class PiResolution(namedtuple('PiResolution', ('width', 'height'))): + """ + A :func:`~collections.namedtuple` derivative which represents a resolution + with a :attr:`width` and :attr:`height`. + + .. attribute:: width + + The width of the resolution in pixels + + .. attribute:: height + + The height of the resolution in pixels + + .. versionadded:: 1.11 + """ + + __slots__ = () # workaround python issue #24931 + + def pad(self, width=32, height=16): + """ + Returns the resolution padded up to the nearest multiple of *width* + and *height* which default to 32 and 16 respectively (the camera's + native block size for most operations). For example: + + .. code-block:: pycon + + >>> PiResolution(1920, 1080).pad() + PiResolution(width=1920, height=1088) + >>> PiResolution(100, 100).pad(16, 16) + PiResolution(width=128, height=112) + >>> PiResolution(100, 100).pad(16, 16) + PiResolution(width=112, height=112) + """ + return PiResolution( + width=((self.width + (width - 1)) // width) * width, + height=((self.height + (height - 1)) // height) * height, + ) + + def transpose(self): + """ + Returns the resolution with the width and height transposed. For + example: + + .. code-block:: pycon + + >>> PiResolution(1920, 1080).transpose() + PiResolution(width=1080, height=1920) + """ + return PiResolution(self.height, self.width) + + def __str__(self): + return '%dx%d' % (self.width, self.height) + + +class PiFramerateRange(namedtuple('PiFramerateRange', ('low', 'high'))): + """ + This class is a :func:`~collections.namedtuple` derivative used to store + the low and high limits of a range of framerates. It is recommended that + you access the information stored by this class by attribute rather than + position (for example: ``camera.framerate_range.low`` rather than + ``camera.framerate_range[0]``). + + .. attribute:: low + + The lowest framerate that the camera is permitted to use (inclusive). + When the :attr:`~picamera.PiCamera.framerate_range` attribute is + queried, this value will always be returned as a + :class:`~fractions.Fraction`. + + .. attribute:: high + + The highest framerate that the camera is permitted to use (inclusive). + When the :attr:`~picamera.PiCamera.framerate_range` attribute is + queried, this value will always be returned as a + :class:`~fractions.Fraction`. + + .. versionadded:: 1.13 + """ + + __slots__ = () # workaround python issue #24931 + + def __new__(cls, low, high): + return super(PiFramerateRange, cls).__new__(cls, to_fraction(low), + to_fraction(high)) + + def __str__(self): + return '%s..%s' % (self.low, self.high) + + +class PiSensorMode(namedtuple('PiSensorMode', ('resolution', 'framerates', + 'video', 'still', 'full_fov'))): + """ + This class is a :func:`~collections.namedtuple` derivative used to store + the attributes describing a camera sensor mode. + + .. attribute:: resolution + + A :class:`PiResolution` specifying the size of frames output by the + camera in this mode. + + .. attribute:: framerates + + A :class:`PiFramerateRange` specifying the minimum and maximum + framerates supported by this sensor mode. Typically the low value is + exclusive and high value inclusive. + + .. attribute:: video + + A :class:`bool` indicating whether or not the mode is capable of + recording video. Currently this is always ``True``. + + .. attribute:: still + + A :class:`bool` indicating whether the mode can be used for still + captures (cases where a capture method is called with + ``use_video_port`` set to ``False``). + + .. attribute:: full_fov + + A :class:`bool` indicating whether the full width of the sensor + area is used to capture frames. This can be ``True`` even when the + resolution is less than the camera's maximum resolution due to binning + and skipping. See :ref:`camera_modes` for a diagram of the available + fields of view. + """ + + __slots__ = () # workaround python issue #24931 + + def __new__(cls, resolution, framerates, video=True, still=False, + full_fov=True): + return super(PiSensorMode, cls).__new__( + cls, + resolution + if isinstance(resolution, PiResolution) else + to_resolution(resolution), + framerates + if isinstance(framerates, PiFramerateRange) else + PiFramerateRange(*framerates), + video, still, full_fov) + + +def open_stream(stream, output=True, buffering=65536): + """ + This is the core of picamera's IO-semantics. It returns a tuple of a + file-like object and a bool indicating whether the stream requires closing + once the caller is finished with it. + + * If *stream* is a string, it is opened as a file object (with mode 'wb' if + *output* is ``True``, and the specified amount of *bufffering*). In this + case the function returns ``(stream, True)``. + + * If *stream* is a stream with a ``write`` method, it is returned as + ``(stream, False)``. + + * Otherwise *stream* is assumed to be a writeable buffer and is wrapped + with :class:`BufferIO`. The function returns ``(stream, True)``. + """ + if isinstance(stream, bytes): + stream = stream.decode('ascii') + opened = isinstance(stream, str) + if opened: + stream = io.open(stream, 'wb' if output else 'rb', buffering) + else: + try: + if output: + stream.write + else: + stream.read + except AttributeError: + # Assume the stream is actually a buffer + opened = True + stream = BufferIO(stream) + if output and not stream.writable: + raise IOError('writeable buffer required for output') + return (stream, opened) + + +def close_stream(stream, opened): + """ + If *opened* is ``True``, then the ``close`` method of *stream* will be + called. Otherwise, the function will attempt to call the ``flush`` method + on *stream* (if one exists). This function essentially takes the output + of :func:`open_stream` and finalizes the result. + """ + if opened: + stream.close() + else: + try: + stream.flush() + except AttributeError: + pass + + +def to_resolution(value): + """ + Converts *value* which may be a (width, height) tuple or a string + containing a representation of a resolution (e.g. "1024x768" or "1080p") to + a (width, height) tuple. + """ + if isinstance(value, bytes): + value = value.decode('utf-8') + if isinstance(value, str): + try: + # A selection from https://en.wikipedia.org/wiki/Graphics_display_resolution + # Feel free to suggest additions + w, h = { + 'VGA': (640, 480), + 'SVGA': (800, 600), + 'XGA': (1024, 768), + 'SXGA': (1280, 1024), + 'UXGA': (1600, 1200), + 'HD': (1280, 720), + 'FHD': (1920, 1080), + '1080P': (1920, 1080), + '720P': (1280, 720), + }[value.strip().upper()] + except KeyError: + w, h = (int(i.strip()) for i in value.upper().split('X', 1)) + else: + try: + w, h = value + except (TypeError, ValueError): + raise PiCameraValueError("Invalid resolution tuple: %r" % value) + return PiResolution(w, h) + + +def to_fraction(value, den_limit=65536): + """ + Converts *value*, which can be any numeric type, an MMAL_RATIONAL_T, or a + (numerator, denominator) tuple to a :class:`~fractions.Fraction` limiting + the denominator to the range 0 < n <= *den_limit* (which defaults to + 65536). + """ + try: + # int, long, or fraction + n, d = value.numerator, value.denominator + except AttributeError: + try: + # float + n, d = value.as_integer_ratio() + except AttributeError: + try: + n, d = value.num, value.den + except AttributeError: + try: + # tuple + n, d = value + warnings.warn( + PiCameraDeprecated( + "Setting framerate or gains as a tuple is " + "deprecated; please use one of Python's many " + "numeric classes like int, float, Decimal, or " + "Fraction instead")) + except (TypeError, ValueError): + # try and convert anything else to a Fraction directly + value = Fraction(value) + n, d = value.numerator, value.denominator + # Ensure denominator is reasonable + if d == 0: + raise PiCameraValueError("Denominator cannot be 0") + elif d > den_limit: + return Fraction(n, d).limit_denominator(den_limit) + else: + return Fraction(n, d) + + +def to_rational(value): + """ + Converts *value* (which can be anything accepted by :func:`to_fraction`) to + an MMAL_RATIONAL_T structure. + """ + value = to_fraction(value) + return mmal.MMAL_RATIONAL_T(value.numerator, value.denominator) + + +def buffer_bytes(buf): + """ + Given an object which implements the :ref:`buffer protocol + `, this function returns the size of the object in bytes. + The object can be multi-dimensional or include items larger than byte-size. + """ + if not isinstance(buf, memoryview): + buf = memoryview(buf) + return buf.itemsize * reduce(mul, buf.shape) + + +def debug_pipeline(port): + """ + Given an :class:`MMALVideoPort` *port*, this traces all objects in the + pipeline feeding it (including components and connections) and yields each + object in turn. Hence the generator typically yields something like: + + * :class:`MMALVideoPort` (the specified output port) + * :class:`MMALEncoder` (the encoder which owns the output port) + * :class:`MMALVideoPort` (the encoder's input port) + * :class:`MMALConnection` (the connection between the splitter and encoder) + * :class:`MMALVideoPort` (the splitter's output port) + * :class:`MMALSplitter` (the splitter on the camera's video port) + * :class:`MMALVideoPort` (the splitter's input port) + * :class:`MMALConnection` (the connection between the splitter and camera) + * :class:`MMALVideoPort` (the camera's video port) + * :class:`MMALCamera` (the camera component) + """ + + def find_port(addr): + for obj in MMALObject.REGISTRY: + if isinstance(obj, MMALControlPort): + if ct.addressof(obj._port[0]) == addr: + return obj + raise IndexError('unable to locate port with address %x' % addr) + + def find_component(addr): + for obj in MMALObject.REGISTRY: + if isinstance(obj, MMALBaseComponent) and obj._component is not None: + if ct.addressof(obj._component[0]) == addr: + return obj + raise IndexError('unable to locate component with address %x' % addr) + + assert isinstance(port, (MMALControlPort, MMALPythonPort)) + while True: + if port.type == mmal.MMAL_PORT_TYPE_OUTPUT: + yield port + if isinstance(port, MMALPythonPort): + comp = port._owner() + else: + comp = find_component(ct.addressof(port._port[0].component[0])) + yield comp + if not isinstance(comp, (MMALComponent, MMALPythonComponent)): + break + if comp.connection is None: + break + if isinstance(comp.connection, MMALPythonConnection): + port = comp.connection._target + else: + port = find_port(ct.addressof(comp.connection._connection[0].in_[0])) + yield port + yield comp.connection + if isinstance(comp.connection, MMALPythonConnection): + port = comp.connection._source + else: + port = find_port(ct.addressof(comp.connection._connection[0].out[0])) + + +def print_pipeline(port): + """ + Prints a human readable representation of the pipeline feeding the + specified :class:`MMALVideoPort` *port*. + """ + rows = [[], [], [], [], [], []] + under_comp = False + for obj in reversed(list(debug_pipeline(port))): + if isinstance(obj, (MMALBaseComponent, MMALPythonBaseComponent)): + rows[0].append(obj.name) + under_comp = True + elif isinstance(obj, MMALVideoPort): + rows[0].append('[%d]' % obj._port[0].index) + if under_comp: + rows[1].append('encoding') + if obj.format == mmal.MMAL_ENCODING_OPAQUE: + rows[1].append(obj.opaque_subformat) + else: + rows[1].append(mmal.FOURCC_str(obj._port[0].format[0].encoding)) + if under_comp: + rows[2].append('buf') + rows[2].append('%dx%d' % (obj._port[0].buffer_num, obj._port[0].buffer_size)) + if under_comp: + rows[3].append('bitrate') + rows[3].append('%dbps' % (obj._port[0].format[0].bitrate,)) + if under_comp: + rows[4].append('frame') + rows[4].append('%dx%d@%sfps' % ( + obj._port[0].format[0].es[0].video.width, + obj._port[0].format[0].es[0].video.height, + obj.framerate)) + if under_comp: + rows[5].append('colorspc') + under_comp = False + rows[5].append(mmal.FOURCC_str(obj._port[0].format[0].es[0].video.color_space)) + elif isinstance(obj, MMALPythonPort): + rows[0].append('[%d]' % obj._index) + if under_comp: + rows[1].append('encoding') + if obj.format == mmal.MMAL_ENCODING_OPAQUE: + rows[1].append(obj.opaque_subformat) + else: + rows[1].append(mmal.FOURCC_str(obj._format[0].encoding)) + if under_comp: + rows[2].append('buf') + rows[2].append('%dx%d' % (obj.buffer_count, obj.buffer_size)) + if under_comp: + rows[3].append('bitrate') + rows[3].append('%dbps' % (obj._format[0].bitrate,)) + if under_comp: + rows[4].append('frame') + under_comp = False + rows[4].append('%dx%d@%sfps' % ( + obj._format[0].es[0].video.width, + obj._format[0].es[0].video.height, + obj.framerate)) + if under_comp: + rows[5].append('colorspc') + rows[5].append('???') + elif isinstance(obj, (MMALConnection, MMALPythonConnection)): + rows[0].append('') + rows[1].append('') + rows[2].append('-->') + rows[3].append('') + rows[4].append('') + rows[5].append('') + if under_comp: + rows[1].append('encoding') + rows[2].append('buf') + rows[3].append('bitrate') + rows[4].append('frame') + rows[5].append('colorspc') + cols = list(zip(*rows)) + max_lens = [max(len(s) for s in col) + 2 for col in cols] + rows = [ + ''.join('{0:{align}{width}s}'.format(s, align=align, width=max_len) + for s, max_len, align in zip(row, max_lens, cycle('^<^>'))) + for row in rows + ] + for row in rows: + print(row) + + +class MMALObject(object): + """ + Represents an object wrapper around an MMAL object (component, port, + connection, etc). This base class maintains a registry of all MMAL objects + currently alive (via weakrefs) which permits object lookup by name and + listing all used MMAL objects. + """ + + __slots__ = ('__weakref__',) + REGISTRY = weakref.WeakSet() + + def __init__(self): + super(MMALObject, self).__init__() + MMALObject.REGISTRY.add(self) + + +class MMALBaseComponent(MMALObject): + """ + Represents a generic MMAL component. Class attributes are read to determine + the component type, and the OPAQUE sub-formats of each connectable port. + """ + + __slots__ = ('_component', '_control', '_inputs', '_outputs') + component_type = b'none' + opaque_input_subformats = () + opaque_output_subformats = () + + def __init__(self): + super(MMALBaseComponent, self).__init__() + self._component = ct.POINTER(mmal.MMAL_COMPONENT_T)() + mmal_check( + mmal.mmal_component_create(self.component_type, self._component), + prefix="Failed to create MMAL component %s" % self.component_type) + if self._component[0].input_num != len(self.opaque_input_subformats): + raise PiCameraRuntimeError( + 'Expected %d inputs but found %d on component %s' % ( + len(self.opaque_input_subformats), + self._component[0].input_num, + self.component_type)) + if self._component[0].output_num != len(self.opaque_output_subformats): + raise PiCameraRuntimeError( + 'Expected %d outputs but found %d on component %s' % ( + len(self.opaque_output_subformats), + self._component[0].output_num, + self.component_type)) + self._control = MMALControlPort(self._component[0].control) + port_class = { + mmal.MMAL_ES_TYPE_UNKNOWN: MMALPort, + mmal.MMAL_ES_TYPE_CONTROL: MMALControlPort, + mmal.MMAL_ES_TYPE_VIDEO: MMALVideoPort, + mmal.MMAL_ES_TYPE_AUDIO: MMALAudioPort, + mmal.MMAL_ES_TYPE_SUBPICTURE: MMALSubPicturePort, + } + self._inputs = tuple( + port_class[self._component[0].input[n][0].format[0].type]( + self._component[0].input[n], opaque_subformat) + for n, opaque_subformat in enumerate(self.opaque_input_subformats)) + self._outputs = tuple( + port_class[self._component[0].output[n][0].format[0].type]( + self._component[0].output[n], opaque_subformat) + for n, opaque_subformat in enumerate(self.opaque_output_subformats)) + + def close(self): + """ + Close the component and release all its resources. After this is + called, most methods will raise exceptions if called. + """ + if self._component is not None: + # ensure we free any pools associated with input/output ports + for output in self.outputs: + output.disable() + for input in self.inputs: + input.disable() + mmal.mmal_component_destroy(self._component) + self._component = None + self._inputs = () + self._outputs = () + self._control = None + + @property + def name(self): + return self._component[0].name.decode('ascii') + + @property + def control(self): + """ + The :class:`MMALControlPort` control port of the component which can be + used to configure most aspects of the component's behaviour. + """ + return self._control + + @property + def inputs(self): + """ + A sequence of :class:`MMALPort` objects representing the inputs + of the component. + """ + return self._inputs + + @property + def outputs(self): + """ + A sequence of :class:`MMALPort` objects representing the outputs + of the component. + """ + return self._outputs + + @property + def enabled(self): + """ + Returns ``True`` if the component is currently enabled. Use + :meth:`enable` and :meth:`disable` to control the component's state. + """ + return bool(self._component[0].is_enabled) + + def enable(self): + """ + Enable the component. When a component is enabled it will process data + sent to its input port(s), sending the results to buffers on its output + port(s). Components may be implicitly enabled by connections. + """ + mmal_check( + mmal.mmal_component_enable(self._component), + prefix="Failed to enable component") + + def disable(self): + """ + Disables the component. + """ + mmal_check( + mmal.mmal_component_disable(self._component), + prefix="Failed to disable component") + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + def __repr__(self): + if self._component is not None: + return '<%s "%s": %d inputs %d outputs>' % ( + self.__class__.__name__, self.name, + len(self.inputs), len(self.outputs)) + else: + return '<%s closed>' % self.__class__.__name__ + + +class MMALControlPort(MMALObject): + """ + Represents an MMAL port with properties to configure the port's parameters. + """ + __slots__ = ('_port', '_params', '_wrapper') + + def __init__(self, port): + super(MMALControlPort, self).__init__() + self._port = port + self._params = MMALPortParams(port) + self._wrapper = None + + @property + def index(self): + """ + Returns an integer indicating the port's position within its owning + list (inputs, outputs, etc.) + """ + return self._port[0].index + + @property + def enabled(self): + """ + Returns a :class:`bool` indicating whether the port is currently + enabled. Unlike other classes, this is a read-only property. Use + :meth:`enable` and :meth:`disable` to modify the value. + """ + return bool(self._port[0].is_enabled) + + def enable(self, callback=None): + """ + Enable the port with the specified callback function (this must be + ``None`` for connected ports, and a callable for disconnected ports). + + The callback function must accept two parameters which will be this + :class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer` + instance. Any return value will be ignored. + """ + def wrapper(port, buf): + buf = MMALBuffer(buf) + try: + callback(self, buf) + finally: + buf.release() + + if callback: + self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper) + else: + self._wrapper = ct.cast(None, mmal.MMAL_PORT_BH_CB_T) + mmal_check( + mmal.mmal_port_enable(self._port, self._wrapper), + prefix="Unable to enable port %s" % self.name) + + def disable(self): + """ + Disable the port. + """ + # NOTE: The test here only exists to avoid spamming the console; when + # disabling an already disabled port MMAL dumps errors to stderr. If + # this test isn't here closing a camera results in half a dozen lines + # of ignored errors + if self.enabled: + try: + mmal_check( + mmal.mmal_port_disable(self._port), + prefix="Unable to disable port %s" % self.name) + except PiCameraMMALError as e: + # Ignore the error if we're disabling an already disabled port + if not (e.status == mmal.MMAL_EINVAL and not self.enabled): + raise e + self._wrapper = None + + @property + def name(self): + result = self._port[0].name.decode('ascii') + if result.endswith(')'): + try: + # strip (format) from port names as it doesn't really belong + # there (it doesn't identify the port in any way) and makes + # matching some of the correctional cases a pain + return result[:result.rindex('(')] + except ValueError: + return result + else: + return result + + @property + def type(self): + """ + The type of the port. One of: + + * MMAL_PORT_TYPE_OUTPUT + * MMAL_PORT_TYPE_INPUT + * MMAL_PORT_TYPE_CONTROL + * MMAL_PORT_TYPE_CLOCK + """ + return self._port[0].type + + @property + def capabilities(self): + """ + The capabilities of the port. A bitfield of the following: + + * MMAL_PORT_CAPABILITY_PASSTHROUGH + * MMAL_PORT_CAPABILITY_ALLOCATION + * MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE + """ + return self._port[0].capabilities + + @property + def params(self): + """ + The configurable parameters for the port. This is presented as a + mutable mapping of parameter numbers to values, implemented by the + :class:`MMALPortParams` class. + """ + return self._params + + def __repr__(self): + if self._port is not None: + return '' % self.name + else: + return '' + + +class MMALPort(MMALControlPort): + """ + Represents an MMAL port with properties to configure and update the port's + format. This is the base class of :class:`MMALVideoPort`, + :class:`MMALAudioPort`, and :class:`MMALSubPicturePort`. + """ + __slots__ = ('_opaque_subformat', '_pool', '_stopped', '_connection') + + # A mapping of corrected definitions of supported_formats for ports with + # particular names. Older firmwares either raised EINVAL, ENOSYS, or just + # reported the wrong things for various ports; these lists are derived from + # querying newer firmwares or in some cases guessing sensible defaults + # (for ports where even the newer firmwares get stuff wrong). + _supported_formats_patch = { + 'vc.ril.camera:out:2': [ + mmal.MMAL_ENCODING_I420, + mmal.MMAL_ENCODING_NV12, + mmal.MMAL_ENCODING_I422, + mmal.MMAL_ENCODING_YUYV, + mmal.MMAL_ENCODING_YVYU, + mmal.MMAL_ENCODING_VYUY, + mmal.MMAL_ENCODING_UYVY, + mmal.MMAL_ENCODING_BGR24, + mmal.MMAL_ENCODING_BGRA, + mmal.MMAL_ENCODING_RGB16, + mmal.MMAL_ENCODING_YV12, + mmal.MMAL_ENCODING_NV21, + mmal.MMAL_ENCODING_RGB24, + mmal.MMAL_ENCODING_RGBA, + ], + 'vc.ril.image_encode:in:0': [ + mmal.MMAL_ENCODING_RGB16, + mmal.MMAL_ENCODING_RGB24, + mmal.MMAL_ENCODING_RGBA, + mmal.MMAL_ENCODING_BGRA, + mmal.MMAL_ENCODING_I420, + mmal.MMAL_ENCODING_I422, + mmal.MMAL_ENCODING_NV12, + mmal.MMAL_ENCODING_YUYV, + mmal.MMAL_ENCODING_YVYU, + mmal.MMAL_ENCODING_VYUY, + ], + 'vc.ril.image_encode:out:0': [ + mmal.MMAL_ENCODING_JPEG, + mmal.MMAL_ENCODING_GIF, + mmal.MMAL_ENCODING_PNG, + mmal.MMAL_ENCODING_BMP, + mmal.MMAL_ENCODING_PPM, + mmal.MMAL_ENCODING_TGA, + ], + 'vc.ril.resize:in:0': [ + mmal.MMAL_ENCODING_RGBA, + mmal.MMAL_ENCODING_BGRA, + mmal.MMAL_ENCODING_RGB16, + mmal.MMAL_ENCODING_I420, + # several invalid encodings (lowercase versions of the priors) + # appear here in modern firmwares but since they don't map to any + # constants they're excluded + mmal.MMAL_ENCODING_I420_SLICE, + ], + 'vc.ril.resize:out:0': [ + mmal.MMAL_ENCODING_RGBA, + mmal.MMAL_ENCODING_BGRA, + mmal.MMAL_ENCODING_RGB16, + mmal.MMAL_ENCODING_I420, + # same invalid encodings as above here + mmal.MMAL_ENCODING_I420_SLICE, + ], + 'vc.ril.isp:in:0': [ + mmal.MMAL_ENCODING_BAYER_SBGGR8, + mmal.MMAL_ENCODING_BAYER_SBGGR10DPCM8, + mmal.MMAL_ENCODING_BAYER_SBGGR10P, + mmal.MMAL_ENCODING_BAYER_SBGGR12P, + mmal.MMAL_ENCODING_YUYV, + mmal.MMAL_ENCODING_YVYU, + mmal.MMAL_ENCODING_VYUY, + mmal.MMAL_ENCODING_UYVY, + mmal.MMAL_ENCODING_I420, + mmal.MMAL_ENCODING_YV12, + mmal.MMAL_ENCODING_I422, + mmal.MMAL_ENCODING_RGB24, + mmal.MMAL_ENCODING_BGR24, + mmal.MMAL_ENCODING_RGBA, + mmal.MMAL_ENCODING_BGRA, + mmal.MMAL_ENCODING_RGB16, + mmal.MMAL_ENCODING_YUVUV128, + mmal.MMAL_ENCODING_NV12, + mmal.MMAL_ENCODING_NV21, + ], + 'vc.ril.isp:out:0': [ + mmal.MMAL_ENCODING_YUYV, + mmal.MMAL_ENCODING_YVYU, + mmal.MMAL_ENCODING_VYUY, + mmal.MMAL_ENCODING_UYVY, + mmal.MMAL_ENCODING_I420, + mmal.MMAL_ENCODING_YV12, + mmal.MMAL_ENCODING_I422, + mmal.MMAL_ENCODING_RGB24, + mmal.MMAL_ENCODING_BGR24, + mmal.MMAL_ENCODING_RGBA, + mmal.MMAL_ENCODING_BGRA, + mmal.MMAL_ENCODING_RGB16, + mmal.MMAL_ENCODING_YUVUV128, + mmal.MMAL_ENCODING_NV12, + mmal.MMAL_ENCODING_NV21, + ], + 'vc.null_sink:in:0': [ + mmal.MMAL_ENCODING_I420, + mmal.MMAL_ENCODING_RGB24, + mmal.MMAL_ENCODING_BGR24, + mmal.MMAL_ENCODING_RGBA, + mmal.MMAL_ENCODING_BGRA, + ], + } + + def __init__(self, port, opaque_subformat='OPQV'): + super(MMALPort, self).__init__(port) + self.opaque_subformat = opaque_subformat + self._pool = None + self._stopped = True + self._connection = None + + def __repr__(self): + if self._port is not None: + return '' % ( + self.name, mmal.FOURCC_str(self.format), + self.buffer_count, self.buffer_size) + else: + return '' + + def _get_opaque_subformat(self): + return self._opaque_subformat + def _set_opaque_subformat(self, value): + self._opaque_subformat = value + opaque_subformat = property( + _get_opaque_subformat, _set_opaque_subformat, doc="""\ + Retrieves or sets the opaque sub-format that the port speaks. While + most formats (I420, RGBA, etc.) mean one thing, the opaque format is + special; different ports produce different sorts of data when + configured for OPQV format. This property stores a string which + uniquely identifies what the associated port means for OPQV format. + + If the port does not support opaque format at all, set this property to + ``None``. + + :class:`MMALConnection` uses this information when negotiating formats + for a connection between two ports. + """) + + def _get_format(self): + result = self._port[0].format[0].encoding + if FIX_RGB_BGR_ORDER: + return { + mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24, + mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24, + }.get(result, result) + else: + return result + def _set_format(self, value): + if FIX_RGB_BGR_ORDER: + value = { + mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24, + mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24, + }.get(value, value) + self._port[0].format[0].encoding = value + if value == mmal.MMAL_ENCODING_OPAQUE: + self._port[0].format[0].encoding_variant = mmal.MMAL_ENCODING_I420 + format = property(_get_format, _set_format, doc="""\ + Retrieves or sets the encoding format of the port. Setting this + attribute implicitly sets the encoding variant to a sensible value + (I420 in the case of OPAQUE). + + After setting this attribute, call :meth:`commit` to make the changes + effective. + """) + + @property + def supported_formats(self): + """ + Retrieves a sequence of supported encodings on this port. + """ + try: + mp = self.params[mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS] + except PiCameraMMALError as e: + if e.status in (mmal.MMAL_EINVAL, mmal.MMAL_ENOSYS): + # Workaround: old firmwares raise EINVAL or ENOSYS when various + # ports are queried for supported formats. The following is the + # correct sequence for old firmwares (note: swapped RGB24 and + # BGR24 order in still port) ... probably (vc.ril.camera:out:2 + # is definitely right, the rest are largely guessed based on + # queries of later firmwares) + try: + return MMALPort._supported_formats_patch[self.name] + except KeyError: + raise e + else: + raise + else: + result = [ + v for v in mp.encoding if v != 0 + ][:mp.hdr.size // ct.sizeof(ct.c_uint32)] + # Workaround: Fix incorrect result on MMALImageEncoder.outputs[0] + # from modern firmwares + if self.name == 'vc.ril.image_encode:out:0' and result == [ + mmal.MMAL_ENCODING_MP2V, mmal.MMAL_ENCODING_MP2V, + mmal.MMAL_ENCODING_H264, mmal.MMAL_ENCODING_H264, + mmal.MMAL_ENCODING_VP7, mmal.MMAL_ENCODING_VP7, + mmal.MMAL_ENCODING_VP6, mmal.MMAL_ENCODING_VP6]: + return MMALPort._supported_formats_patch[self.name] + else: + return result + + def _get_bitrate(self): + return self._port[0].format[0].bitrate + def _set_bitrate(self, value): + self._port[0].format[0].bitrate = value + bitrate = property(_get_bitrate, _set_bitrate, doc="""\ + Retrieves or sets the bitrate limit for the port's format. + """) + + def copy_from(self, source): + """ + Copies the port's :attr:`format` from the *source* + :class:`MMALControlPort`. + """ + if isinstance(source, MMALPythonPort): + mmal.mmal_format_copy(self._port[0].format, source._format) + else: + mmal.mmal_format_copy(self._port[0].format, source._port[0].format) + + def commit(self): + """ + Commits the port's configuration and automatically updates the number + and size of associated buffers according to the recommendations of the + MMAL library. This is typically called after adjusting the port's + format and/or associated settings (like width and height for video + ports). + """ + mmal_check( + mmal.mmal_port_format_commit(self._port), + prefix="Format couldn't be set on port %s" % self.name) + # Workaround: Unfortunately, there is an upstream issue with the + # buffer_num_recommended which means it can't currently be used (see + # discussion in raspberrypi/userland#167). There's another upstream + # issue with buffer_num_min which means we need to guard against 0 + # values... + self._port[0].buffer_num = max(1, self._port[0].buffer_num_min) + self._port[0].buffer_size = ( + self._port[0].buffer_size_recommended + if self._port[0].buffer_size_recommended > 0 else + self._port[0].buffer_size_min) + + @property + def pool(self): + """ + Returns the :class:`MMALPool` associated with the buffer, if any. + """ + return self._pool + + def get_buffer(self, block=True, timeout=None): + """ + Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block* + and *timeout* act as they do in the corresponding + :meth:`MMALPool.get_buffer`. + """ + if not self.enabled: + raise PiCameraPortDisabled( + 'cannot get buffer from disabled port %s' % self.name) + return self.pool.get_buffer(block, timeout) + + def send_buffer(self, buf): + """ + Send :class:`MMALBuffer` *buf* to the port. + """ + if ( + self.type == mmal.MMAL_PORT_TYPE_INPUT and + isinstance(self._connection, MMALPythonConnection) and + self._connection._callback is not None): + try: + modified_buf = self._connection._callback(self._connection, buf) + except: + buf.release() + raise + else: + if modified_buf is None: + buf.release() + return + else: + buf = modified_buf + try: + mmal_check( + mmal.mmal_port_send_buffer(self._port, buf._buf), + prefix="cannot send buffer to port %s" % self.name) + except PiCameraMMALError as e: + # If port is disabled, convert exception for convenience + if e.status == mmal.MMAL_EINVAL and not self.enabled: + raise PiCameraPortDisabled( + 'cannot send buffer to disabled port %s' % self.name) + else: + raise + + def flush(self): + """ + Flush the port. + """ + mmal_check( + mmal.mmal_port_flush(self._port), + prefix="Unable to flush port %s" % self.name) + + def _get_buffer_count(self): + return self._port[0].buffer_num + def _set_buffer_count(self, value): + if value < 1: + raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1') + self._port[0].buffer_num = value + buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\ + The number of buffers allocated (or to be allocated) to the port. + The ``mmalobj`` layer automatically configures this based on + recommendations from the MMAL library. + """) + + def _get_buffer_size(self): + return self._port[0].buffer_size + def _set_buffer_size(self, value): + if value < 0: + raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0') + self._port[0].buffer_size = value + buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\ + The size of buffers allocated (or to be allocated) to the port. The + size of buffers is typically dictated by the port's format. The + ``mmalobj`` layer automatically configures this based on + recommendations from the MMAL library. + """) + + def enable(self, callback=None): + """ + Enable the port with the specified callback function (this must be + ``None`` for connected ports, and a callable for disconnected ports). + + The callback function must accept two parameters which will be this + :class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer` + instance. The callback should return ``True`` when processing is + complete and no further calls are expected (e.g. at frame-end for an + image encoder), and ``False`` otherwise. + """ + def wrapper(port, buf): + buf = MMALBuffer(buf) + try: + if not self._stopped and callback(self, buf): + self._stopped = True + finally: + buf.release() + try: + self._pool.send_buffer(block=False) + except PiCameraPortDisabled: + # The port was disabled, no point trying again + pass + + # Workaround: There is a bug in the MJPEG encoder that causes a + # deadlock if the FIFO is full on shutdown. Increasing the encoder + # buffer size makes this less likely to happen. See + # raspberrypi/userland#208. Connecting the encoder component resets the + # output port's buffer size, hence why we correct this here, just + # before enabling the port. + if self._port[0].format[0].encoding == mmal.MMAL_ENCODING_MJPEG: + self._port[0].buffer_size = max(512 * 1024, self._port[0].buffer_size_recommended) + if callback: + assert self._stopped + assert self._pool is None + self._stopped = False + self._pool = MMALPortPool(self) + try: + self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper) + mmal_check( + mmal.mmal_port_enable(self._port, self._wrapper), + prefix="Unable to enable port %s" % self.name) + # If this port is an output port, send it all the buffers + # in the pool. If it's an input port, don't bother: the user + # will presumably want to feed buffers to it manually + if self._port[0].type == mmal.MMAL_PORT_TYPE_OUTPUT: + self._pool.send_all_buffers(block=False) + except: + self._pool.close() + self._pool = None + self._stopped = True + raise + else: + super(MMALPort, self).enable() + + def disable(self): + """ + Disable the port. + """ + self._stopped = True + super(MMALPort, self).disable() + if self._pool is not None: + self._pool.close() + self._pool = None + + @property + def connection(self): + """ + If this port is connected to another, this property holds the + :class:`MMALConnection` or :class:`MMALPythonConnection` object which + represents that connection. If this port is not connected, this + property is ``None``. + """ + return self._connection + + def connect(self, other, **options): + """ + Connect this port to the *other* :class:`MMALPort` (or + :class:`MMALPythonPort`). The type and configuration of the connection + will be automatically selected. + + Various connection *options* can be specified as keyword arguments. + These will be passed onto the :class:`MMALConnection` or + :class:`MMALPythonConnection` constructor that is called (see those + classes for an explanation of the available options). + """ + # Always construct connections from the output end + if self.type != mmal.MMAL_PORT_TYPE_OUTPUT: + return other.connect(self, **options) + if other.type != mmal.MMAL_PORT_TYPE_INPUT: + raise PiCameraValueError( + 'A connection can only be established between an output and ' + 'an input port') + if isinstance(other, MMALPythonPort): + return MMALPythonConnection(self, other, **options) + else: + return MMALConnection(self, other, **options) + + def disconnect(self): + """ + Destroy the connection between this port and another port. + """ + if self.connection is not None: + self.connection.close() + + +class MMALVideoPort(MMALPort): + """ + Represents an MMAL port used to pass video data. + """ + __slots__ = () + + def __repr__(self): + if self._port is not None: + return ( + '' % ( + self.name, mmal.FOURCC_str(self.format), + self._port[0].buffer_num, self._port[0].buffer_size, + self.framesize, self.framerate, + mmal.FOURCC_str(self.colorspace))) + else: + return '' + + def _get_framesize(self): + return PiResolution( + self._port[0].format[0].es[0].video.crop.width, + self._port[0].format[0].es[0].video.crop.height, + ) + def _set_framesize(self, value): + value = to_resolution(value) + video = self._port[0].format[0].es[0].video + video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32) + video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16) + video.crop.width = value.width + video.crop.height = value.height + framesize = property(_get_framesize, _set_framesize, doc="""\ + Retrieves or sets the size of the port's video frames as a (width, + height) tuple. This attribute implicitly handles scaling the given + size up to the block size of the camera (32x16). + + After setting this attribute, call :meth:`~MMALPort.commit` to make the + changes effective. + """) + + def _get_framerate(self): + video = self._port[0].format[0].es[0].video + try: + return Fraction( + video.frame_rate.num, + video.frame_rate.den) + except ZeroDivisionError: + assert video.frame_rate.num == 0 + return Fraction(0, 1) + def _set_framerate(self, value): + value = to_fraction(value) + video = self._port[0].format[0].es[0].video + video.frame_rate.num = value.numerator + video.frame_rate.den = value.denominator + framerate = property(_get_framerate, _set_framerate, doc="""\ + Retrieves or sets the framerate of the port's video frames in fps. + + After setting this attribute, call :meth:`~MMALPort.commit` to make the + changes effective. + """) + + def _get_colorspace(self): + return self._port[0].format[0].es[0].video.color_space + def _set_colorspace(self, value): + self._port[0].format[0].es[0].video.color_space = value + colorspace = property(_get_colorspace, _set_colorspace, doc="""\ + Retrieves or sets the color-space of the port's frames. + + After setting this attribute, call :meth:`~MMALPort.commit` to make the + changes effective. + """) + + +class MMALAudioPort(MMALPort): + """ + Represents an MMAL port used to pass audio data. + """ + __slots__ = () + + def __repr__(self): + if self._port is not None: + return '' % ( + self.name, mmal.FOURCC_str(self.format), + self._port[0].buffer_num, self._port[0].buffer_size) + else: + return '' + + +class MMALSubPicturePort(MMALPort): + """ + Represents an MMAL port used to pass sub-picture (caption) data. + """ + __slots__ = () + + def __repr__(self): + if self._port is not None: + return '' % ( + self.name, mmal.FOURCC_str(self.format), + self._port[0].buffer_num, self._port[0].buffer_size) + else: + return '' + + +class MMALPortParams(object): + """ + Represents the parameters of an MMAL port. This class implements the + :attr:`MMALControlPort.params` attribute. + + Internally, the class understands how to convert certain structures to more + common Python data-types. For example, parameters that expect an + MMAL_RATIONAL_T type will return and accept Python's + :class:`~fractions.Fraction` class (or any other numeric types), while + parameters that expect an MMAL_BOOL_T type will treat anything as a truthy + value. Parameters that expect the MMAL_PARAMETER_STRING_T structure will be + treated as plain strings, and likewise MMAL_PARAMETER_INT32_T and similar + structures will be treated as plain ints. + + Parameters that expect more complex structures will return and expect + those structures verbatim. + """ + __slots__ = ('_port',) + + def __init__(self, port): + super(MMALPortParams, self).__init__() + self._port = port + + def __getitem__(self, key): + dtype = PARAM_TYPES[key] + # Use the short-cut functions where possible (teeny bit faster if we + # get some C to do the structure wrapping for us) + func = { + mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_get_rational, + mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_get_boolean, + mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_get_int32, + mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_get_int64, + mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_get_uint32, + mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_get_uint64, + }.get(dtype, mmal.mmal_port_parameter_get) + conv = { + mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: Fraction(v.num, v.den), + mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: v.value != mmal.MMAL_FALSE, + mmal.MMAL_PARAMETER_INT32_T: lambda v: v.value, + mmal.MMAL_PARAMETER_INT64_T: lambda v: v.value, + mmal.MMAL_PARAMETER_UINT32_T: lambda v: v.value, + mmal.MMAL_PARAMETER_UINT64_T: lambda v: v.value, + mmal.MMAL_PARAMETER_STRING_T: lambda v: v.str.decode('ascii'), + }.get(dtype, lambda v: v) + if func == mmal.mmal_port_parameter_get: + result = dtype( + mmal.MMAL_PARAMETER_HEADER_T(key, ct.sizeof(dtype)) + ) + mmal_check( + func(self._port, result.hdr), + prefix="Failed to get parameter %d" % key) + else: + dtype = { + mmal.MMAL_PARAMETER_RATIONAL_T: mmal.MMAL_RATIONAL_T, + mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.MMAL_BOOL_T, + mmal.MMAL_PARAMETER_INT32_T: ct.c_int32, + mmal.MMAL_PARAMETER_INT64_T: ct.c_int64, + mmal.MMAL_PARAMETER_UINT32_T: ct.c_uint32, + mmal.MMAL_PARAMETER_UINT64_T: ct.c_uint64, + }[dtype] + result = dtype() + mmal_check( + func(self._port, key, result), + prefix="Failed to get parameter %d" % key) + return conv(result) + + def __setitem__(self, key, value): + dtype = PARAM_TYPES[key] + func = { + mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_set_rational, + mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_set_boolean, + mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_set_int32, + mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_set_int64, + mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_set_uint32, + mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_set_uint64, + mmal.MMAL_PARAMETER_STRING_T: mmal.mmal_port_parameter_set_string, + }.get(dtype, mmal.mmal_port_parameter_set) + conv = { + mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: to_rational(v), + mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: mmal.MMAL_TRUE if v else mmal.MMAL_FALSE, + mmal.MMAL_PARAMETER_STRING_T: lambda v: v.encode('ascii'), + }.get(dtype, lambda v: v) + if func == mmal.mmal_port_parameter_set: + mp = conv(value) + assert mp.hdr.id == key + assert mp.hdr.size >= ct.sizeof(dtype) + mmal_check( + func(self._port, mp.hdr), + prefix="Failed to set parameter %d to %r" % (key, value)) + else: + mmal_check( + func(self._port, key, conv(value)), + prefix="Failed to set parameter %d to %r" % (key, value)) + + +class MMALBuffer(object): + """ + Represents an MMAL buffer header. This is usually constructed from the + buffer header pointer and is largely supplied to make working with + the buffer's data a bit simpler. Using the buffer as a context manager + implicitly locks the buffer's memory and returns the :mod:`ctypes` + buffer object itself:: + + def callback(port, buf): + with buf as data: + # data is a ctypes uint8 array with size entries + print(len(data)) + + Alternatively you can use the :attr:`data` property directly, which returns + and modifies the buffer's data as a :class:`bytes` object (note this is + generally slower than using the buffer object unless you are simply + replacing the entire buffer):: + + def callback(port, buf): + # the buffer contents as a byte-string + print(buf.data) + """ + __slots__ = ('_buf',) + + def __init__(self, buf): + super(MMALBuffer, self).__init__() + self._buf = buf + + def _get_command(self): + return self._buf[0].cmd + def _set_command(self, value): + self._buf[0].cmd = value + command = property(_get_command, _set_command, doc="""\ + The command set in the buffer's meta-data. This is usually 0 for + buffers returned by an encoder; typically this is only used by buffers + sent to the callback of a control port. + """) + + def _get_flags(self): + return self._buf[0].flags + def _set_flags(self, value): + self._buf[0].flags = value + flags = property(_get_flags, _set_flags, doc="""\ + The flags set in the buffer's meta-data, returned as a bitmapped + integer. Typical flags include: + + * ``MMAL_BUFFER_HEADER_FLAG_EOS`` -- end of stream + * ``MMAL_BUFFER_HEADER_FLAG_FRAME_START`` -- start of frame data + * ``MMAL_BUFFER_HEADER_FLAG_FRAME_END`` -- end of frame data + * ``MMAL_BUFFER_HEADER_FLAG_KEYFRAME`` -- frame is a key-frame + * ``MMAL_BUFFER_HEADER_FLAG_FRAME`` -- frame data + * ``MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO`` -- motion estimatation data + """) + + def _get_pts(self): + return self._buf[0].pts + def _set_pts(self, value): + self._buf[0].pts = value + pts = property(_get_pts, _set_pts, doc="""\ + The presentation timestamp (PTS) of the buffer, as an integer number + of microseconds or ``MMAL_TIME_UNKNOWN``. + """) + + def _get_dts(self): + return self._buf[0].dts + def _set_dts(self, value): + self._buf[0].dts = value + dts = property(_get_dts, _set_dts, doc="""\ + The decoding timestamp (DTS) of the buffer, as an integer number of + microseconds or ``MMAL_TIME_UNKNOWN``. + """) + + @property + def size(self): + """ + Returns the length of the buffer's data area in bytes. This will be + greater than or equal to :attr:`length` and is fixed in value. + """ + return self._buf[0].alloc_size + + def _get_offset(self): + return self._buf[0].offset + def _set_offset(self, value): + assert 0 <= value <= self.size + self._buf[0].offset = value + self.length = min(self.size - self.offset, self.length) + offset = property(_get_offset, _set_offset, doc="""\ + The offset from the start of the buffer at which the data actually + begins. Defaults to 0. If this is set to a value which would force the + current :attr:`length` off the end of the buffer's :attr:`size`, then + :attr:`length` will be decreased automatically. + """) + + def _get_length(self): + return self._buf[0].length + def _set_length(self, value): + assert 0 <= value <= self.size - self.offset + self._buf[0].length = value + length = property(_get_length, _set_length, doc="""\ + The length of data held in the buffer. Must be less than or equal to + the allocated size of data held in :attr:`size` minus the data + :attr:`offset`. This attribute can be used to effectively blank the + buffer by setting it to zero. + """) + + def _get_data(self): + with self as buf: + return ct.string_at( + ct.byref(buf, self._buf[0].offset), + self._buf[0].length) + def _set_data(self, value): + value_len = buffer_bytes(value) + if value_len: + if value_len > self.size: + raise PiCameraValueError( + 'data is too large for buffer (%d > %d)' % ( + value_len, self.size)) + bp = ct.c_uint8 * value_len + try: + sp = bp.from_buffer(value) + except TypeError: + sp = bp.from_buffer_copy(value) + with self as buf: + ct.memmove(buf, sp, value_len) + self._buf[0].offset = 0 + self._buf[0].length = value_len + data = property(_get_data, _set_data, doc="""\ + The data held in the buffer as a :class:`bytes` string. You can set + this attribute to modify the data in the buffer. Acceptable values + are anything that supports the buffer protocol, and which contains + :attr:`size` bytes or less. Setting this attribute implicitly modifies + the :attr:`length` attribute to the length of the specified value and + sets :attr:`offset` to zero. + + .. note:: + + Accessing a buffer's data via this attribute is relatively slow + (as it copies the buffer's data to/from Python objects). See the + :class:`MMALBuffer` documentation for details of a faster (but + more complex) method. + """) + + def replicate(self, source): + """ + Replicates the *source* :class:`MMALBuffer`. This copies all fields + from the *source* buffer, including the internal :attr:`data` pointer. + In other words, after replication this buffer and the *source* buffer + will share the same block of memory for *data*. + + The *source* buffer will also be referenced internally by this buffer + and will only be recycled once this buffer is released. + + .. note:: + + This is fundamentally different to the operation of the + :meth:`copy_from` method. It is much faster, but imposes the burden + that two buffers now share data (the *source* cannot be released + until the replicant has been released). + """ + mmal_check( + mmal.mmal_buffer_header_replicate(self._buf, source._buf), + prefix='unable to replicate buffer') + + def copy_from(self, source): + """ + Copies all fields (including data) from the *source* + :class:`MMALBuffer`. This buffer must have sufficient :attr:`size` to + store :attr:`length` bytes from the *source* buffer. This method + implicitly sets :attr:`offset` to zero, and :attr:`length` to the + number of bytes copied. + + .. note:: + + This is fundamentally different to the operation of the + :meth:`replicate` method. It is much slower, but afterward the + copied buffer is entirely independent of the *source*. + """ + assert self.size >= source.length + source_len = source._buf[0].length + if source_len: + with self as target_buf, source as source_buf: + ct.memmove(target_buf, ct.byref(source_buf, source.offset), source_len) + self._buf[0].offset = 0 + self._buf[0].length = source_len + self.copy_meta(source) + + def copy_meta(self, source): + """ + Copy meta-data from the *source* :class:`MMALBuffer`; specifically this + copies all buffer fields with the exception of :attr:`data`, + :attr:`length` and :attr:`offset`. + """ + self._buf[0].cmd = source._buf[0].cmd + self._buf[0].flags = source._buf[0].flags + self._buf[0].dts = source._buf[0].dts + self._buf[0].pts = source._buf[0].pts + self._buf[0].type[0] = source._buf[0].type[0] + + def acquire(self): + """ + Acquire a reference to the buffer. This will prevent the buffer from + being recycled until :meth:`release` is called. This method can be + called multiple times in which case an equivalent number of calls + to :meth:`release` must be made before the buffer will actually be + released. + """ + mmal.mmal_buffer_header_acquire(self._buf) + + def release(self): + """ + Release a reference to the buffer. This is the opposing call to + :meth:`acquire`. Once all references have been released, the buffer + will be recycled. + """ + mmal.mmal_buffer_header_release(self._buf) + + def reset(self): + """ + Resets all buffer header fields to default values. + """ + mmal.mmal_buffer_header_reset(self._buf) + + def __enter__(self): + mmal_check( + mmal.mmal_buffer_header_mem_lock(self._buf), + prefix='unable to lock buffer header memory') + return ct.cast( + self._buf[0].data, + ct.POINTER(ct.c_uint8 * self._buf[0].alloc_size)).contents + + def __exit__(self, *exc): + mmal.mmal_buffer_header_mem_unlock(self._buf) + return False + + def __repr__(self): + if self._buf is not None: + return '' % ( + ''.join(( + 'S' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_START else '_', + 'E' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END else '_', + 'K' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_KEYFRAME else '_', + 'C' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CONFIG else '_', + 'M' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else '_', + 'X' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_EOS else '_', + )), { + 0: 'none', + mmal.MMAL_EVENT_ERROR: 'error', + mmal.MMAL_EVENT_FORMAT_CHANGED: 'format-change', + mmal.MMAL_EVENT_PARAMETER_CHANGED: 'param-change', + mmal.MMAL_EVENT_EOS: 'end-of-stream', + }[self.command], self.length) + else: + return '' + + +class MMALQueue(object): + """ + Represents an MMAL buffer queue. Buffers can be added to the queue with the + :meth:`put` method, and retrieved from the queue (with optional wait + timeout) with the :meth:`get` method. + """ + __slots__ = ('_queue', '_created') + + def __init__(self, queue): + self._created = False + self._queue = queue + + @classmethod + def create(cls): + self = cls(mmal.mmal_queue_create()) + self._created = True + return self + + def close(self): + if self._created: + mmal_queue_destroy(self._queue) + self._queue = None + + def __len__(self): + return mmal.mmal_queue_length(self._queue) + + def get(self, block=True, timeout=None): + """ + Get the next buffer from the queue. If *block* is ``True`` (the default) + and *timeout* is ``None`` (the default) then the method will block + until a buffer is available. Otherwise *timeout* is the maximum time to + wait (in seconds) for a buffer to become available. If a buffer is not + available before the timeout expires, the method returns ``None``. + + Likewise, if *block* is ``False`` and no buffer is immediately + available then ``None`` is returned. + """ + if block and timeout is None: + buf = mmal.mmal_queue_wait(self._queue) + elif block and timeout is not None: + buf = mmal.mmal_queue_timedwait(self._queue, int(timeout * 1000)) + else: + buf = mmal.mmal_queue_get(self._queue) + if buf: + return MMALBuffer(buf) + + def put(self, buf): + """ + Place :class:`MMALBuffer` *buf* at the back of the queue. + """ + mmal.mmal_queue_put(self._queue, buf._buf) + + def put_back(self, buf): + """ + Place :class:`MMALBuffer` *buf* at the front of the queue. This is + used when a buffer was removed from the queue but needs to be put + back at the front where it was originally taken from. + """ + mmal.mmal_queue_put_back(self._queue, buf._buf) + + +class MMALPool(object): + """ + Represents an MMAL pool containing :class:`MMALBuffer` objects. All active + ports are associated with a pool of buffers, and a queue. Instances can be + treated as a sequence of :class:`MMALBuffer` objects but this is only + recommended for debugging purposes; otherwise, use the :meth:`get_buffer`, + :meth:`send_buffer`, and :meth:`send_all_buffers` methods which work with + the encapsulated :class:`MMALQueue`. + """ + __slots__ = ('_pool', '_queue') + + def __init__(self, pool): + self._pool = pool + super(MMALPool, self).__init__() + self._queue = MMALQueue(pool[0].queue) + + def __len__(self): + return self._pool[0].headers_num + + def __getitem__(self, index): + return MMALBuffer(self._pool[0].header[index]) + + @property + def queue(self): + """ + The :class:`MMALQueue` associated with the pool. + """ + return self._queue + + def close(self): + if self._pool is not None: + mmal.mmal_pool_destroy(self._pool) + self._pool = None + + def resize(self, new_count, new_size): + """ + Resizes the pool to contain *new_count* buffers with *new_size* bytes + allocated to each buffer. + + *new_count* must be 1 or more (you cannot resize a pool to contain + no headers). However, *new_size* can be 0 which causes all payload + buffers to be released. + + .. warning:: + + If the pool is associated with a port, the port must be disabled + when resizing the pool. + """ + mmal_check( + mmal.mmal_pool_resize(self._pool, new_count, new_size), + prefix='unable to resize pool') + + def get_buffer(self, block=True, timeout=None): + """ + Get the next buffer from the pool's queue. See :meth:`MMALQueue.get` + for the meaning of the parameters. + """ + return self._queue.get(block, timeout) + + def send_buffer(self, port, block=True, timeout=None): + """ + Get a buffer from the pool's queue and send it to *port*. *block* and + *timeout* act as they do in :meth:`get_buffer`. If no buffer is + available (for the values of *block* and *timeout*, + :exc:`~picamera.PiCameraMMALError` is raised). + """ + buf = self.get_buffer(block, timeout) + if buf is None: + raise PiCameraMMALError(mmal.MMAL_EAGAIN, 'no buffers available') + port.send_buffer(buf) + + def send_all_buffers(self, port, block=True, timeout=None): + """ + Send all buffers from the queue to *port*. *block* and *timeout* act as + they do in :meth:`get_buffer`. If no buffer is available (for the + values of *block* and *timeout*, :exc:`~picamera.PiCameraMMALError` is + raised). + """ + for i in range(len(self._queue)): + self.send_buffer(port, block, timeout) + + +class MMALPortPool(MMALPool): + """ + Construct an MMAL pool for the number and size of buffers required by + the :class:`MMALPort` *port*. + """ + __slots__ = ('_port',) + + def __init__(self, port): + pool = mmal.mmal_port_pool_create( + port._port, port._port[0].buffer_num, port._port[0].buffer_size) + if not pool: + raise PiCameraMMALError( + mmal.MMAL_ENOSPC, + 'failed to create buffer header pool for port %s' % port.name) + super(MMALPortPool, self).__init__(pool) + self._port = port + + def close(self): + if self._pool is not None: + mmal.mmal_port_pool_destroy(self._port._port, self._pool) + self._port = None + self._pool = None + super(MMALPortPool, self).close() + + @property + def port(self): + return self._port + + def send_buffer(self, port=None, block=True, timeout=None): + """ + Get a buffer from the pool and send it to *port* (or the port the pool + is associated with by default). *block* and *timeout* act as they do in + :meth:`MMALPool.get_buffer`. + """ + if port is None: + port = self._port + super(MMALPortPool, self).send_buffer(port, block, timeout) + + def send_all_buffers(self, port=None, block=True, timeout=None): + """ + Send all buffers from the pool to *port* (or the port the pool is + associated with by default). *block* and *timeout* act as they do in + :meth:`MMALPool.get_buffer`. + """ + if port is None: + port = self._port + super(MMALPortPool, self).send_all_buffers(port, block, timeout) + + +class MMALBaseConnection(MMALObject): + """ + Abstract base class for :class:`MMALConnection` and + :class:`MMALPythonConnection`. Handles weakrefs to the source and + target ports, and format negotiation. All other connection details are + handled by the descendent classes. + """ + __slots__ = ('_source', '_target') + + default_formats = () + + compatible_opaque_formats = { + ('OPQV-single', 'OPQV-single'), + ('OPQV-dual', 'OPQV-dual'), + ('OPQV-strips', 'OPQV-strips'), + ('OPQV-dual', 'OPQV-single'), + ('OPQV-single', 'OPQV-dual'), # recent firmwares permit this + } + + def __init__( + self, source, target, formats=default_formats): + super(MMALBaseConnection, self).__init__() + if not isinstance(source, (MMALPort, MMALPythonPort)): + raise PiCameraValueError('source is not a port') + if not isinstance(target, (MMALPort, MMALPythonPort)): + raise PiCameraValueError('target is not a port') + if source.type != mmal.MMAL_PORT_TYPE_OUTPUT: + raise PiCameraValueError('source is not an output port') + if target.type != mmal.MMAL_PORT_TYPE_INPUT: + raise PiCameraValueError('target is not an input port') + if source.connection is not None: + raise PiCameraValueError('source port is already connected') + if target.connection is not None: + raise PiCameraValueError('target port is already connected') + if formats is None: + formats = () + self._source = source + self._target = target + try: + iter(formats) + except TypeError: + formats = (formats,) + self._negotiate_format(formats) + source._connection = self + target._connection = self + # Descendents continue with connection implementation... + + def close(self): + if self._source is not None: + self._source._connection = None + self._source = None + if self._target is not None: + self._target._connection = None + self._target = None + + def _negotiate_format(self, formats): + + def copy_format(): + self._source.commit() + self._target.copy_from(self._source) + self._target.commit() + + def max_buffers(): + self._source.buffer_count = self._target.buffer_count = max( + self._source.buffer_count, self._target.buffer_count) + self._source.buffer_size = self._target.buffer_size = max( + self._source.buffer_size, self._target.buffer_size) + + # Filter out formats that aren't supported on both source and target + # ports. This is a little tricky as ports that support OPAQUE never + # claim they do (so we have to assume it's mutually supported) + mutually_supported = ( + set(self._source.supported_formats) & + set(self._target.supported_formats) + ) | {mmal.MMAL_ENCODING_OPAQUE} + formats = [f for f in formats if f in mutually_supported] + + if formats: + # If there are any formats left to try, perform the negotiation + # with the filtered list. Again, there's some special casing to + # deal with the incompatible OPAQUE sub-formats + for f in formats: + if f == mmal.MMAL_ENCODING_OPAQUE: + if (self._source.opaque_subformat, + self._target.opaque_subformat) in self.compatible_opaque_formats: + self._source.format = mmal.MMAL_ENCODING_OPAQUE + else: + continue + else: + self._source.format = f + try: + copy_format() + except PiCameraMMALError as e: + if e.status != mmal.MMAL_EINVAL: + raise + continue + else: + max_buffers() + return + raise PiCameraMMALError( + mmal.MMAL_EINVAL, 'failed to negotiate port format') + else: + # If no formats are available to try (either from filtering or + # because none were given), assume the source port is set up + # properly. Just copy the format to the target and hope the caller + # knows what they're doing + try: + copy_format() + except PiCameraMMALError as e: + if e.status != mmal.MMAL_EINVAL: + raise + raise PiCameraMMALError( + mmal.MMAL_EINVAL, 'failed to copy source format to target port') + else: + max_buffers() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + @property + def source(self): + """ + The source :class:`MMALPort` or :class:`MMALPythonPort` of the + connection. + """ + return self._source + + @property + def target(self): + """ + The target :class:`MMALPort` or :class:`MMALPythonPort` of the + connection. + """ + return self._target + + +class MMALConnection(MMALBaseConnection): + """ + Represents an MMAL internal connection between two components. The + constructor accepts arguments providing the *source* :class:`MMALPort` and + *target* :class:`MMALPort`. + + The *formats* parameter specifies an iterable of formats (in preference + order) that the connection may attempt when negotiating formats between + the two ports. If this is ``None``, or an empty iterable, no negotiation + will take place and the source port's format will simply be copied to the + target port. Otherwise, the iterable will be worked through in order until + a format acceptable to both ports is discovered. + + .. note:: + + The default *formats* list starts with OPAQUE; the class understands + the different OPAQUE sub-formats (see :ref:`mmal` for more information) + and will only select OPAQUE if compatible sub-formats can be used on + both ports. + + The *callback* parameter can optionally specify a callable which will be + executed for each buffer that traverses the connection (providing an + opportunity to manipulate or drop that buffer). If specified, it must be a + callable which accepts two parameters: the :class:`MMALConnection` object + sending the data, and the :class:`MMALBuffer` object containing data. The + callable may optionally manipulate the :class:`MMALBuffer` and return it + to permit it to continue traversing the connection, or return ``None`` + in which case the buffer will be released. + + .. note:: + + There is a significant performance penalty for specifying a + callback between MMAL components as it requires buffers to be + copied from the GPU's memory to the CPU's memory and back again. + + .. data:: default_formats + :annotation: = (MMAL_ENCODING_OPAQUE, MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA) + + Class attribute defining the default formats used to negotiate + connections between MMAL components. + """ + __slots__ = ('_connection', '_callback', '_wrapper') + + default_formats = ( + mmal.MMAL_ENCODING_OPAQUE, + mmal.MMAL_ENCODING_I420, + mmal.MMAL_ENCODING_RGB24, + mmal.MMAL_ENCODING_BGR24, + mmal.MMAL_ENCODING_RGBA, + mmal.MMAL_ENCODING_BGRA, + ) + + def __init__( + self, source, target, formats=default_formats, callback=None): + if not isinstance(source, MMALPort): + raise PiCameraValueError('source is not an MMAL port') + if not isinstance(target, MMALPort): + raise PiCameraValueError('target is not an MMAL port') + super(MMALConnection, self).__init__(source, target, formats) + self._connection = ct.POINTER(mmal.MMAL_CONNECTION_T)() + self._callback = callback + flags = mmal.MMAL_CONNECTION_FLAG_ALLOCATION_ON_INPUT + if callback is None: + flags |= mmal.MMAL_CONNECTION_FLAG_TUNNELLING + try: + mmal_check( + mmal.mmal_connection_create( + self._connection, source._port, target._port, flags), + prefix="Failed to create connection") + except: + self._connection = None + raise + + def close(self): + if self._connection is not None: + mmal.mmal_connection_destroy(self._connection) + self._connection = None + self._wrapper = None + super(MMALConnection, self).close() + + @property + def enabled(self): + """ + Returns ``True`` if the connection is enabled. Use :meth:`enable` + and :meth:`disable` to control the state of the connection. + """ + return bool(self._connection[0].is_enabled) + + def enable(self): + """ + Enable the connection. When a connection is enabled, data is + continually transferred from the output port of the source to the input + port of the target component. + """ + def wrapper(connection): + buf = mmal.mmal_queue_get(connection[0].queue) + if buf: + buf = MMALBuffer(buf) + try: + modified_buf = self._callback(self, buf) + except: + buf.release() + raise + else: + if modified_buf is not None: + try: + self._target.send_buffer(modified_buf) + except PiCameraPortDisabled: + # Target port disabled; ignore the error + pass + else: + buf.release() + return + buf = mmal.mmal_queue_get(connection[0].pool[0].queue) + if buf: + buf = MMALBuffer(buf) + try: + self._source.send_buffer(buf) + except PiCameraPortDisabled: + # Source port has been disabled; ignore the error + pass + + if self._callback is not None: + self._wrapper = mmal.MMAL_CONNECTION_CALLBACK_T(wrapper) + self._connection[0].callback = self._wrapper + self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True + self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True + mmal_check( + mmal.mmal_connection_enable(self._connection), + prefix="Failed to enable connection") + if self._callback is not None: + MMALPool(self._connection[0].pool).send_all_buffers(self._source) + + def disable(self): + """ + Disables the connection. + """ + mmal_check( + mmal.mmal_connection_disable(self._connection), + prefix="Failed to disable connection") + self._wrapper = None + + @property + def name(self): + return self._connection[0].name.decode('ascii') + + def __repr__(self): + if self._connection is not None: + return '' % self.name + else: + return '' + + +class MMALRawCamera(MMALBaseComponent): + """ + The MMAL "raw camera" component. + + Don't use this! If you insist on using this anyway, read the forum post + about `raw sensor access`_ first. + + .. raw sensor access: https://www.raspberrypi.org/forums/viewtopic.php?f=43&t=109137 + """ + __slots__ = () + component_type = mmal.MMAL_COMPONENT_RAW_CAMERA + opaque_input_subformats = () + opaque_output_subformats = ('OPQV-single',) + + +class MMALCamera(MMALBaseComponent): + """ + Represents the MMAL camera component. This component has 0 input ports and + 3 output ports. The intended use of the output ports (which in turn + determines the behaviour of those ports) is as follows: + + * Port 0 is intended for preview renderers + + * Port 1 is intended for video recording + + * Port 2 is intended for still image capture + + Use the ``MMAL_PARAMETER_CAMERA_CONFIG`` parameter on the control port to + obtain and manipulate the camera's configuration. + """ + __slots__ = () + + component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA + opaque_output_subformats = ('OPQV-single', 'OPQV-dual', 'OPQV-strips') + + annotate_structs = ( + mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_T, + mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V2_T, + mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V3_T, + ) + + def __init__(self): + global FIX_RGB_BGR_ORDER + super(MMALCamera, self).__init__() + if PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] is None: + found = False + # try largest struct to smallest as later firmwares still happily + # accept earlier revision structures + # XXX do old firmwares reject too-large structs? + for struct in reversed(MMALCamera.annotate_structs): + try: + PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = struct + self.control.params[mmal.MMAL_PARAMETER_ANNOTATE] + except PiCameraMMALError: + pass + else: + found = True + break + if not found: + PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = None + raise PiCameraMMALError( + mmal.MMAL_EINVAL, "unknown camera annotation structure revision") + if FIX_RGB_BGR_ORDER is None: + # old firmware lists BGR24 before RGB24 in supported_formats + for f in self.outputs[1].supported_formats: + if f == mmal.MMAL_ENCODING_BGR24: + FIX_RGB_BGR_ORDER = True + break + elif f == mmal.MMAL_ENCODING_RGB24: + FIX_RGB_BGR_ORDER = False + break + + def _get_annotate_rev(self): + try: + return MMALCamera.annotate_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE]) + 1 + except IndexError: + raise PiCameraMMALError( + mmal.MMAL_EINVAL, "unknown camera annotation structure revision") + def _set_annotate_rev(self, value): + try: + PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = MMALCamera.annotate_structs[value - 1] + except IndexError: + raise PiCameraMMALError( + mmal.MMAL_EINVAL, "invalid camera annotation structure revision") + annotate_rev = property(_get_annotate_rev, _set_annotate_rev, doc="""\ + The annotation capabilities of the firmware have evolved over time and + several structures are available for querying and setting video + annotations. By default the :class:`MMALCamera` class will pick the + latest annotation structure supported by the current firmware but you + can select older revisions with :attr:`annotate_rev` for other purposes + (e.g. testing). + """) + + +class MMALCameraInfo(MMALBaseComponent): + """ + Represents the MMAL camera-info component. Query the + ``MMAL_PARAMETER_CAMERA_INFO`` parameter on the control port to obtain + information about the connected camera module. + """ + __slots__ = () + + component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA_INFO + + info_structs = ( + mmal.MMAL_PARAMETER_CAMERA_INFO_T, + mmal.MMAL_PARAMETER_CAMERA_INFO_V2_T, + ) + + def __init__(self): + super(MMALCameraInfo, self).__init__() + if PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] is None: + found = False + # try smallest structure to largest as later firmwares reject + # older structures + for struct in MMALCameraInfo.info_structs: + try: + PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = struct + self.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO] + except PiCameraMMALError: + pass + else: + found = True + break + if not found: + PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = None + raise PiCameraMMALError( + mmal.MMAL_EINVAL, "unknown camera info structure revision") + + def _get_info_rev(self): + try: + return MMALCameraInfo.info_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO]) + 1 + except IndexError: + raise PiCameraMMALError( + mmal.MMAL_EINVAL, "unknown camera info structure revision") + def _set_info_rev(self, value): + try: + PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = MMALCameraInfo.info_structs[value - 1] + except IndexError: + raise PiCameraMMALError( + mmal.MMAL_EINVAL, "invalid camera info structure revision") + info_rev = property(_get_info_rev, _set_info_rev, doc="""\ + The camera information capabilities of the firmware have evolved over + time and several structures are available for querying camera + information. When initialized, :class:`MMALCameraInfo` will attempt + to discover which structure is in use by the extant firmware. This + property can be used to discover the structure version and to modify + the version in use for other purposes (e.g. testing). + """) + + +class MMALComponent(MMALBaseComponent): + """ + Represents an MMAL component that acts as a filter of some sort, with a + single input that connects to an upstream source port. This is an asbtract + base class. + """ + __slots__ = () + + def __init__(self): + super(MMALComponent, self).__init__() + assert len(self.opaque_input_subformats) == 1 + + def close(self): + self.disconnect() + super(MMALComponent, self).close() + + def enable(self): + super(MMALComponent, self).enable() + if self.connection is not None: + self.connection.enable() + + def disable(self): + if self.connection is not None: + self.connection.disable() + super(MMALComponent, self).disable() + + def connect(self, source, **options): + """ + Connects the input port of this component to the specified *source* + :class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a + convenience (primarily intended for command line experimentation; don't + use this in scripts), *source* can be another component in which case + the first unconnected output port will be selected as *source*. + + Keyword arguments will be passed along to the connection constructor. + See :class:`MMALConnection` and :class:`MMALPythonConnection` for + further information. + """ + if isinstance(source, (MMALPort, MMALPythonPort)): + return self.inputs[0].connect(source) + else: + for port in source.outputs: + if not port.connection: + return self.inputs[0].connect(port, **options) + raise PiCameraMMALError( + mmal.MMAL_EINVAL, 'no free output ports on %r' % source) + + def disconnect(self): + """ + Destroy the connection between this component's input port and the + upstream component. + """ + self.inputs[0].disconnect() + + @property + def connection(self): + """ + The :class:`MMALConnection` or :class:`MMALPythonConnection` object + linking this component to the upstream component. + """ + return self.inputs[0].connection + + +class MMALSplitter(MMALComponent): + """ + Represents the MMAL splitter component. This component has 1 input port + and 4 output ports which all generate duplicates of buffers passed to the + input port. + """ + __slots__ = () + component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_SPLITTER + opaque_input_subformats = ('OPQV-single',) + opaque_output_subformats = ('OPQV-single',) * 4 + + +class MMALISPResizer(MMALComponent): + """ + Represents the MMAL ISP resizer component. This component has 1 input port + and 1 output port, and supports resizing via the VideoCore ISP, along with + conversion of numerous formats into numerous other formats (e.g. OPAQUE to + RGB, etc). This is more efficient than :class:`MMALResizer` but is only + available on later firmware versions. + """ + __slots__ = () + component_type = mmal.MMAL_COMPONENT_DEFAULT_ISP + opaque_input_subformats = ('OPQV-single',) + opaque_output_subformats = (None,) + + +class MMALResizer(MMALComponent): + """ + Represents the MMAL VPU resizer component. This component has 1 input port + and 1 output port. This supports resizing via the VPU. This is not as + efficient as :class:`MMALISPResizer` but is available on all firmware + verions. The output port can (and usually should) have a different frame + size to the input port. + """ + __slots__ = () + component_type = mmal.MMAL_COMPONENT_DEFAULT_RESIZER + opaque_input_subformats = (None,) + opaque_output_subformats = (None,) + + +class MMALEncoder(MMALComponent): + """ + Represents a generic MMAL encoder. This is an abstract base class. + """ + __slots__ = () + + +class MMALVideoEncoder(MMALEncoder): + """ + Represents the MMAL video encoder component. This component has 1 input + port and 1 output port. The output port is usually configured with + ``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``. + """ + __slots__ = () + component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_ENCODER + opaque_input_subformats = ('OPQV-dual',) + opaque_output_subformats = (None,) + + +class MMALImageEncoder(MMALEncoder): + """ + Represents the MMAL image encoder component. This component has 1 input + port and 1 output port. The output port is typically configured with + ``MMAL_ENCODING_JPEG`` but can also use ``MMAL_ENCODING_PNG``, + ``MMAL_ENCODING_GIF``, etc. + """ + __slots__ = () + component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_ENCODER + opaque_input_subformats = ('OPQV-strips',) + opaque_output_subformats = (None,) + + +class MMALDecoder(MMALComponent): + """ + Represents a generic MMAL decoder. This is an abstract base class. + """ + __slots__ = () + + +class MMALVideoDecoder(MMALDecoder): + """ + Represents the MMAL video decoder component. This component has 1 input + port and 1 output port. The input port is usually configured with + ``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``. + """ + __slots__ = () + component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_DECODER + opaque_input_subformats = (None,) + opaque_output_subformats = ('OPQV-single',) + + +class MMALImageDecoder(MMALDecoder): + """ + Represents the MMAL iamge decoder component. This component has 1 input + port and 1 output port. The input port is usually configured with + ``MMAL_ENCODING_JPEG``. + """ + __slots__ = () + component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_DECODER + opaque_input_subformats = (None,) + opaque_output_subformats = ('OPQV-single',) + + +class MMALRenderer(MMALComponent): + """ + Represents the MMAL renderer component. This component has 1 input port and + 0 output ports. It is used to implement the camera preview and overlays. + """ + __slots__ = () + component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_RENDERER + opaque_input_subformats = ('OPQV-single',) + + +class MMALNullSink(MMALComponent): + """ + Represents the MMAL null-sink component. This component has 1 input port + and 0 output ports. It is used to keep the preview port "alive" (and thus + calculating white-balance and exposure) when the camera preview is not + required. + """ + __slots__ = () + component_type = mmal.MMAL_COMPONENT_DEFAULT_NULL_SINK + opaque_input_subformats = ('OPQV-single',) + + +class MMALPythonPort(MMALObject): + """ + Implements ports for Python-based MMAL components. + """ + __slots__ = ( + '_buffer_count', + '_buffer_size', + '_connection', + '_enabled', + '_owner', + '_pool', + '_type', + '_index', + '_supported_formats', + '_format', + '_callback', + ) + + _FORMAT_BPP = { + 'I420': 1.5, + 'RGB3': 3, + 'RGBA': 4, + 'BGR3': 3, + 'BGRA': 4, + } + + def __init__(self, owner, port_type, index): + self._buffer_count = 2 + self._buffer_size = 0 + self._connection = None + self._enabled = False + self._owner = weakref.ref(owner) + self._pool = None + self._callback = None + self._type = port_type + self._index = index + self._supported_formats = { + mmal.MMAL_ENCODING_I420, + mmal.MMAL_ENCODING_RGB24, + mmal.MMAL_ENCODING_BGR24, + mmal.MMAL_ENCODING_RGBA, + mmal.MMAL_ENCODING_BGRA, + } + self._format = ct.pointer(mmal.MMAL_ES_FORMAT_T( + type=mmal.MMAL_ES_TYPE_VIDEO, + encoding=mmal.MMAL_ENCODING_I420, + es=ct.pointer(mmal.MMAL_ES_SPECIFIC_FORMAT_T()))) + + def close(self): + self.disconnect() + self.disable() + self._format = None + + def __repr__(self): + return '' % ( + self.name, mmal.FOURCC_str(self.format), self.buffer_count, + self.buffer_size, self.framesize, self.framerate) + + def _get_bitrate(self): + return self._format[0].bitrate + def _set_bitrate(self, value): + self._format[0].bitrate = value + bitrate = property(_get_bitrate, _set_bitrate, doc="""\ + Retrieves or sets the bitrate limit for the port's format. + """) + + def _get_supported_formats(self): + return self._supported_formats + def _set_supported_formats(self, value): + try: + value = {f for f in value} + except TypeError: + value = {value} + if not value: + raise PiCameraMMALError( + mmal.MMAL_EINVAL, "port must have at least one valid format") + self._supported_formats = value + supported_formats = property(_get_supported_formats, _set_supported_formats, doc="""\ + Retrieves or sets the set of valid formats for this port. The set must + always contain at least one valid format. A single format can be + specified; it will be converted implicitly to a singleton set. + + If the current port :attr:`format` is not a member of the new set, no + error is raised. An error will be raised when :meth:`commit` is next + called if :attr:`format` is still not a member of the set. + """) + + def _get_format(self): + return self._format[0].encoding + def _set_format(self, value): + self._format[0].encoding = value + format = property(_get_format, _set_format, doc="""\ + Retrieves or sets the encoding format of the port. Setting this + attribute implicitly sets the encoding variant to a sensible value + (I420 in the case of OPAQUE). + """) + + def _get_framesize(self): + return PiResolution( + self._format[0].es[0].video.crop.width, + self._format[0].es[0].video.crop.height, + ) + def _set_framesize(self, value): + value = to_resolution(value) + video = self._format[0].es[0].video + video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32) + video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16) + video.crop.width = value.width + video.crop.height = value.height + framesize = property(_get_framesize, _set_framesize, doc="""\ + Retrieves or sets the size of the source's video frames as a (width, + height) tuple. This attribute implicitly handles scaling the given + size up to the block size of the camera (32x16). + """) + + def _get_framerate(self): + video = self._format[0].es[0].video + try: + return Fraction( + video.frame_rate.num, + video.frame_rate.den) + except ZeroDivisionError: + return Fraction(0, 1) + def _set_framerate(self, value): + value = to_fraction(value) + video = self._format[0].es[0].video + video.frame_rate.num = value.numerator + video.frame_rate.den = value.denominator + framerate = property(_get_framerate, _set_framerate, doc="""\ + Retrieves or sets the framerate of the port's video frames in fps. + """) + + @property + def pool(self): + """ + Returns the :class:`MMALPool` associated with the buffer, if any. + """ + return self._pool + + @property + def opaque_subformat(self): + return None + + def _get_buffer_count(self): + return self._buffer_count + def _set_buffer_count(self, value): + if value < 1: + raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1') + self._buffer_count = int(value) + buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\ + The number of buffers allocated (or to be allocated) to the port. The + default is 2 but more may be required in the case of long pipelines + with replicated buffers. + """) + + def _get_buffer_size(self): + return self._buffer_size + def _set_buffer_size(self, value): + if value < 0: + raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0') + self._buffer_size = value + buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\ + The size of buffers allocated (or to be allocated) to the port. The + size of buffers defaults to a value dictated by the port's format. + """) + + def copy_from(self, source): + """ + Copies the port's :attr:`format` from the *source* + :class:`MMALControlPort`. + """ + if isinstance(source, MMALPythonPort): + mmal.mmal_format_copy(self._format, source._format) + else: + mmal.mmal_format_copy(self._format, source._port[0].format) + + def commit(self): + """ + Commits the port's configuration and automatically updates the number + and size of associated buffers. This is typically called after + adjusting the port's format and/or associated settings (like width and + height for video ports). + """ + if self.format not in self.supported_formats: + raise PiCameraMMALError( + mmal.MMAL_EINVAL, 'invalid format for port %r' % self) + self._buffer_count = 2 + video = self._format[0].es[0].video + try: + self._buffer_size = int( + MMALPythonPort._FORMAT_BPP[str(self.format)] + * video.width + * video.height) + except KeyError: + # If it's an unknown / encoded format just leave the buffer size + # alone and hope the owning component knows what to set + pass + self._owner()._commit_port(self) + + @property + def enabled(self): + """ + Returns a :class:`bool` indicating whether the port is currently + enabled. Unlike other classes, this is a read-only property. Use + :meth:`enable` and :meth:`disable` to modify the value. + """ + return self._enabled + + def enable(self, callback=None): + """ + Enable the port with the specified callback function (this must be + ``None`` for connected ports, and a callable for disconnected ports). + + The callback function must accept two parameters which will be this + :class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer` + instance. Any return value will be ignored. + """ + if self._connection is not None: + if callback is not None: + raise PiCameraMMALError( + mmal.MMAL_EINVAL, + 'connected ports must be enabled without callback') + else: + if callback is None: + raise PiCameraMMALError( + mmal.MMAL_EINVAL, + 'unconnected ports must be enabled with callback') + if self.type == mmal.MMAL_PORT_TYPE_INPUT or self._connection is None: + self._pool = MMALPythonPortPool(self) + self._callback = callback + self._enabled = True + + def disable(self): + """ + Disable the port. + """ + self._enabled = False + if self._pool is not None: + # Release any unprocessed buffers from the owner's queue before + # we destroy them all + while True: + buf = self._owner()._queue.get(False) + if buf: + buf.release() + else: + break + self._pool.close() + self._pool = None + self._callback = None + + def get_buffer(self, block=True, timeout=None): + """ + Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block* + and *timeout* act as they do in the corresponding + :meth:`MMALPool.get_buffer`. + """ + if not self._enabled: + raise PiCameraPortDisabled( + 'cannot get buffer from disabled port %s' % self.name) + if self._pool is not None: + # Unconnected port or input port case; retrieve buffer from the + # allocated pool + return self._pool.get_buffer(block, timeout) + else: + # Connected output port case; get a buffer from the target input + # port (in this case the port is just a thin proxy for the + # corresponding input port) + assert self.type == mmal.MMAL_PORT_TYPE_OUTPUT + return self._connection.target.get_buffer(block, timeout) + + def send_buffer(self, buf): + """ + Send :class:`MMALBuffer` *buf* to the port. + """ + # NOTE: The MMALPythonConnection callback must occur *before* the test + # for the port being enabled; it's meant to be the connection making + # the callback prior to the buffer getting to the port after all + if ( + self.type == mmal.MMAL_PORT_TYPE_INPUT and + self._connection._callback is not None): + try: + modified_buf = self._connection._callback(self._connection, buf) + except: + buf.release() + raise + else: + if modified_buf is None: + buf.release() + else: + buf = modified_buf + if not self._enabled: + raise PiCameraPortDisabled( + 'cannot send buffer to disabled port %s' % self.name) + if self._callback is not None: + # but what about output ports? + try: + # XXX Return value? If it's an input port we should ignore it, + self._callback(self, buf) + except: + buf.release() + raise + if self._type == mmal.MMAL_PORT_TYPE_INPUT: + # Input port case; queue the buffer for processing on the + # owning component + self._owner()._queue.put(buf) + elif self._connection is None: + # Unconnected output port case; release the buffer back to the + # pool + buf.release() + else: + # Connected output port case; forward the buffer to the + # connected component's input port + # XXX If it's a format-change event? + self._connection.target.send_buffer(buf) + + @property + def name(self): + return '%s:%s:%d' % (self._owner().name, { + mmal.MMAL_PORT_TYPE_OUTPUT: 'out', + mmal.MMAL_PORT_TYPE_INPUT: 'in', + mmal.MMAL_PORT_TYPE_CONTROL: 'control', + mmal.MMAL_PORT_TYPE_CLOCK: 'clock', + }[self.type], self._index) + + @property + def type(self): + """ + The type of the port. One of: + + * MMAL_PORT_TYPE_OUTPUT + * MMAL_PORT_TYPE_INPUT + * MMAL_PORT_TYPE_CONTROL + * MMAL_PORT_TYPE_CLOCK + """ + return self._type + + @property + def capabilities(self): + """ + The capabilities of the port. A bitfield of the following: + + * MMAL_PORT_CAPABILITY_PASSTHROUGH + * MMAL_PORT_CAPABILITY_ALLOCATION + * MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE + """ + return mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE + + @property + def index(self): + """ + Returns an integer indicating the port's position within its owning + list (inputs, outputs, etc.) + """ + return self._index + + @property + def connection(self): + """ + If this port is connected to another, this property holds the + :class:`MMALConnection` or :class:`MMALPythonConnection` object which + represents that connection. If this port is not connected, this + property is ``None``. + """ + return self._connection + + def connect(self, other, **options): + """ + Connect this port to the *other* :class:`MMALPort` (or + :class:`MMALPythonPort`). The type and configuration of the connection + will be automatically selected. + + Various connection options can be specified as keyword arguments. These + will be passed onto the :class:`MMALConnection` or + :class:`MMALPythonConnection` constructor that is called (see those + classes for an explanation of the available options). + """ + # Always construct connections from the output end + if self.type != mmal.MMAL_PORT_TYPE_OUTPUT: + return other.connect(self, **options) + if other.type != mmal.MMAL_PORT_TYPE_INPUT: + raise PiCameraValueError( + 'A connection can only be established between an output and ' + 'an input port') + return MMALPythonConnection(self, other, **options) + + def disconnect(self): + """ + Destroy the connection between this port and another port. + """ + if self.connection is not None: + self.connection.close() + + +class MMALPythonPortPool(MMALPool): + """ + Creates a pool of buffer headers for an :class:`MMALPythonPort`. This is + only used when a fake port is used without a corresponding + :class:`MMALPythonConnection`. + """ + __slots__ = ('_port',) + + def __init__(self, port): + super(MMALPythonPortPool, self).__init__( + mmal.mmal_pool_create(port.buffer_count, port.buffer_size)) + self._port = port + + @property + def port(self): + return self._port + + def send_buffer(self, port=None, block=True, timeout=None): + """ + Get a buffer from the pool and send it to *port* (or the port the pool + is associated with by default). *block* and *timeout* act as they do in + :meth:`MMALPool.get_buffer`. + """ + if port is None: + port = self._port + super(MMALPythonPortPool, self).send_buffer(port, block, timeout) + + def send_all_buffers(self, port=None, block=True, timeout=None): + """ + Send all buffers from the pool to *port* (or the port the pool is + associated with by default). *block* and *timeout* act as they do in + :meth:`MMALPool.get_buffer`. + """ + if port is None: + port = self._port + super(MMALPythonPortPool, self).send_all_buffers(port, block, timeout) + + +class MMALPythonBaseComponent(MMALObject): + """ + Base class for Python-implemented MMAL components. This class provides the + :meth:`_commit_port` method used by descendents to control their ports' + behaviour, and the :attr:`enabled` property. However, it is unlikely that + users will want to sub-class this directly. See + :class:`MMALPythonComponent` for a more useful starting point. + """ + __slots__ = ('_inputs', '_outputs', '_enabled',) + + def __init__(self): + super(MMALPythonBaseComponent, self).__init__() + self._enabled = False + self._inputs = () + self._outputs = () + # TODO Control port? + + def close(self): + """ + Close the component and release all its resources. After this is + called, most methods will raise exceptions if called. + """ + self.disable() + + @property + def enabled(self): + """ + Returns ``True`` if the component is currently enabled. Use + :meth:`enable` and :meth:`disable` to control the component's state. + """ + return self._enabled + + def enable(self): + """ + Enable the component. When a component is enabled it will process data + sent to its input port(s), sending the results to buffers on its output + port(s). Components may be implicitly enabled by connections. + """ + self._enabled = True + + def disable(self): + """ + Disables the component. + """ + self._enabled = False + + @property + def control(self): + """ + The :class:`MMALControlPort` control port of the component which can be + used to configure most aspects of the component's behaviour. + """ + return None + + @property + def inputs(self): + """ + A sequence of :class:`MMALPort` objects representing the inputs + of the component. + """ + return self._inputs + + @property + def outputs(self): + """ + A sequence of :class:`MMALPort` objects representing the outputs + of the component. + """ + return self._outputs + + def _commit_port(self, port): + """ + Called by ports when their format is committed. Descendents may + override this to reconfigure output ports when input ports are + committed, or to raise errors if the new port configuration is + unacceptable. + + .. warning:: + + This method must *not* reconfigure input ports when called; however + it can reconfigure *output* ports when input ports are committed. + """ + pass + + def __repr__(self): + if self._outputs: + return '<%s "%s": %d inputs %d outputs>' % ( + self.__class__.__name__, self.name, + len(self.inputs), len(self.outputs)) + else: + return '<%s closed>' % self.__class__.__name__ + + +class MMALPythonSource(MMALPythonBaseComponent): + """ + Provides a source for other :class:`MMALComponent` instances. The + specified *input* is read in chunks the size of the configured output + buffer(s) until the input is exhausted. The :meth:`wait` method can be + used to block until this occurs. If the output buffer is configured to + use a full-frame unencoded format (like I420 or RGB), frame-end flags will + be automatically generated by the source. When the input is exhausted an + empty buffer with the End Of Stream (EOS) flag will be sent. + + The component provides all picamera's usual IO-handling characteristics; if + *input* is a string, a file with that name will be opened as the input and + closed implicitly when the component is closed. Otherwise, the input will + not be closed implicitly (the component did not open it, so the assumption + is that closing *input* is the caller's responsibility). If *input* is an + object with a ``read`` method it is assumed to be a file-like object and is + used as is. Otherwise, *input* is assumed to be a readable object + supporting the buffer protocol (which is wrapped in a :class:`BufferIO` + stream). + """ + __slots__ = ('_stream', '_opened', '_thread') + + def __init__(self, input): + super(MMALPythonSource, self).__init__() + self._inputs = () + self._outputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, 0),) + self._stream, self._opened = open_stream(input, output=False) + self._thread = None + + def close(self): + super(MMALPythonSource, self).close() + if self._outputs: + self._outputs[0].close() + self._outputs = () + if self._stream: + close_stream(self._stream, self._opened) + self._stream = None + + def enable(self): + super(MMALPythonSource, self).enable() + self._thread = Thread(target=self._send_run) + self._thread.daemon = True + self._thread.start() + + def disable(self): + super(MMALPythonSource, self).disable() + if self._thread: + self._thread.join() + self._thread = None + + def wait(self, timeout=None): + """ + Wait for the source to send all bytes from the specified input. If + *timeout* is specified, it is the number of seconds to wait for + completion. The method returns ``True`` if the source completed within + the specified timeout and ``False`` otherwise. + """ + if not self.enabled: + raise PiCameraMMALError( + mmal.MMAL_EINVAL, 'cannot wait on disabled component') + self._thread.join(timeout) + return not self._thread.is_alive() + + def _send_run(self): + # Calculate the size of a frame if possible (i.e. when the output + # format is an unencoded full frame format). If it's an unknown / + # encoded format, we've no idea what the framesize is (this would + # presumably require decoding the stream) so leave framesize as None. + video = self._outputs[0]._format[0].es[0].video + try: + framesize = ( + MMALPythonPort._FORMAT_BPP[str(self._outputs[0].format)] + * video.width + * video.height) + except KeyError: + framesize = None + frameleft = framesize + while self.enabled: + buf = self._outputs[0].get_buffer(timeout=0.1) + if buf: + try: + if frameleft is None: + send = buf.size + else: + send = min(frameleft, buf.size) + with buf as data: + if send == buf.size: + try: + # readinto() is by far the fastest method of + # getting data into the buffer + buf.length = self._stream.readinto(data) + except AttributeError: + # if there's no readinto() method, fallback on + # read() and the data setter (memmove) + buf.data = self._stream.read(buf.size) + else: + buf.data = self._stream.read(send) + if frameleft is not None: + frameleft -= buf.length + if not frameleft: + buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END + frameleft = framesize + if not buf.length: + buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_EOS + break + finally: + self._outputs[0].send_buffer(buf) + + @property + def name(self): + return 'py.source' + + +class MMALPythonComponent(MMALPythonBaseComponent): + """ + Provides a Python-based MMAL component with a *name*, a single input and + the specified number of *outputs* (default 1). The :meth:`connect` and + :meth:`disconnect` methods can be used to establish or break a connection + from the input port to an upstream component. + + Typically descendents will override the :meth:`_handle_frame` method to + respond to buffers sent to the input port, and will set + :attr:`MMALPythonPort.supported_formats` in the constructor to define the + formats that the component will work with. + """ + __slots__ = ('_name', '_thread', '_queue', '_error') + + def __init__(self, name='py.component', outputs=1): + super(MMALPythonComponent, self).__init__() + self._name = name + self._thread = None + self._error = None + self._queue = MMALQueue.create() + self._inputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_INPUT, 0),) + self._outputs = tuple( + MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, n) + for n in range(outputs) + ) + + def close(self): + super(MMALPythonComponent, self).close() + self.disconnect() + if self._inputs: + self._inputs[0].close() + self._inputs = () + for output in self._outputs: + output.disable() + self._outputs = () + self._queue.close() + self._queue = None + + def connect(self, source, **options): + """ + Connects the input port of this component to the specified *source* + :class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a + convenience (primarily intended for command line experimentation; don't + use this in scripts), *source* can be another component in which case + the first unconnected output port will be selected as *source*. + + Keyword arguments will be passed along to the connection constructor. + See :class:`MMALConnection` and :class:`MMALPythonConnection` for + further information. + """ + if isinstance(source, (MMALPort, MMALPythonPort)): + return self.inputs[0].connect(source) + else: + for port in source.outputs: + if not port.connection: + return self.inputs[0].connect(port, **options) + raise PiCameraMMALError( + mmal.MMAL_EINVAL, 'no free output ports on %r' % source) + + def disconnect(self): + """ + Destroy the connection between this component's input port and the + upstream component. + """ + self.inputs[0].disconnect() + + @property + def connection(self): + """ + The :class:`MMALConnection` or :class:`MMALPythonConnection` object + linking this component to the upstream component. + """ + return self.inputs[0].connection + + @property + def name(self): + return self._name + + def _commit_port(self, port): + """ + Overridden to to copy the input port's configuration to the output + port(s), and to ensure that the output port(s)' format(s) match + the input port's format. + """ + super(MMALPythonComponent, self)._commit_port(port) + if port.type == mmal.MMAL_PORT_TYPE_INPUT: + for output in self.outputs: + output.copy_from(port) + elif port.type == mmal.MMAL_PORT_TYPE_OUTPUT: + if port.format != self.inputs[0].format: + raise PiCameraMMALError(mmal.MMAL_EINVAL, 'output format mismatch') + + def enable(self): + super(MMALPythonComponent, self).enable() + if not self._thread: + self._thread = Thread(target=self._thread_run) + self._thread.daemon = True + self._thread.start() + + def disable(self): + super(MMALPythonComponent, self).disable() + if self._thread: + self._thread.join() + self._thread = None + if self._error: + raise self._error + + def _thread_run(self): + try: + while self._enabled: + buf = self._queue.get(timeout=0.1) + if buf: + try: + handler = { + 0: self._handle_frame, + mmal.MMAL_EVENT_PARAMETER_CHANGED: self._handle_parameter_changed, + mmal.MMAL_EVENT_FORMAT_CHANGED: self._handle_format_changed, + mmal.MMAL_EVENT_ERROR: self._handle_error, + mmal.MMAL_EVENT_EOS: self._handle_end_of_stream, + }[buf.command] + if handler(self.inputs[0], buf): + self._enabled = False + finally: + buf.release() + except Exception as e: + self._error = e + self._enabled = False + + def _handle_frame(self, port, buf): + """ + Handles frame data buffers (where :attr:`MMALBuffer.command` is set to + 0). + + Typically, if the component has output ports, the method is expected to + fetch a buffer from the output port(s), write data into them, and send + them back to their respective ports. + + Return values are as for normal event handlers (``True`` when no more + buffers are expected, ``False`` otherwise). + """ + return False + + def _handle_format_changed(self, port, buf): + """ + Handles format change events passed to the component (where + :attr:`MMALBuffer.command` is set to MMAL_EVENT_FORMAT_CHANGED). + + The default implementation re-configures the input port of the + component and emits the event on all output ports for downstream + processing. Override this method if you wish to do something else in + response to format change events. + + The *port* parameter is the port into which the event arrived, and + *buf* contains the event itself (a MMAL_EVENT_FORMAT_CHANGED_T + structure). Use ``mmal_event_format_changed_get`` on the buffer's data + to extract the event. + """ + with buf as data: + event = mmal.mmal_event_format_changed_get(buf._buf) + if port.connection: + # Handle format change on the source output port, if any. We + # don't check the output port capabilities because it was the + # port that emitted the format change in the first case so it'd + # be odd if it didn't support them (or the format requested)! + output = port.connection._source + output.disable() + if isinstance(output, MMALPythonPort): + mmal.mmal_format_copy(output._format, event[0].format) + else: + mmal.mmal_format_copy(output._port[0].format, event[0].format) + output.commit() + output.buffer_count = ( + event[0].buffer_num_recommended + if event[0].buffer_num_recommended > 0 else + event[0].buffer_num_min) + output.buffer_size = ( + event[0].buffer_size_recommended + if event[0].buffer_size_recommended > 0 else + event[0].buffer_size_min) + if isinstance(output, MMALPythonPort): + output.enable() + else: + output.enable(port.connection._transfer) + # Now deal with the format change on this input port (this is only + # called from _thread_run so port must be an input port) + try: + if not (port.capabilities & mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE): + raise PiCameraMMALError( + mmal.MMAL_EINVAL, + 'port %s does not support event change' % self.name) + mmal.mmal_format_copy(port._format, event[0].format) + self._commit_port(port) + port.pool.resize( + event[0].buffer_num_recommended + if event[0].buffer_num_recommended > 0 else + event[0].buffer_num_min, + event[0].buffer_size_recommended + if event[0].buffer_size_recommended > 0 else + event[0].buffer_size_min) + port.buffer_count = len(port.pool) + port.buffer_size = port.pool[0].size + except: + # If this port can't handle the format change, or if anything goes + # wrong (like the owning component doesn't like the new format) + # stop the pipeline (from here at least) + if port.connection: + port.connection.disable() + raise + # Chain the format-change onward so everything downstream sees it. + # NOTE: the callback isn't given the format-change because there's no + # image data in it + for output in self.outputs: + out_buf = output.get_buffer() + out_buf.copy_from(buf) + output.send_buffer(out_buf) + return False + + def _handle_parameter_changed(self, port, buf): + """ + Handles parameter change events passed to the component (where + :attr:`MMALBuffer.command` is set to MMAL_EVENT_PARAMETER_CHANGED). + + The default implementation does nothing but return ``False`` + (indicating that processing should continue). Override this in + descendents to respond to parameter changes. + + The *port* parameter is the port into which the event arrived, and + *buf* contains the event itself (a MMAL_EVENT_PARAMETER_CHANGED_T + structure). + """ + return False + + def _handle_error(self, port, buf): + """ + Handles error notifications passed to the component (where + :attr:`MMALBuffer.command` is set to MMAL_EVENT_ERROR). + + The default implementation does nothing but return ``True`` (indicating + that processing should halt). Override this in descendents to respond + to error events. + + The *port* parameter is the port into which the event arrived. + """ + return True + + def _handle_end_of_stream(self, port, buf): + """ + Handles end-of-stream notifications passed to the component (where + :attr:`MMALBuffer.command` is set to MMAL_EVENT_EOS). + + The default implementation does nothing but return ``True`` (indicating + that processing should halt). Override this in descendents to respond + to the end of stream. + + The *port* parameter is the port into which the event arrived. + """ + return True + + +class MMALPythonTarget(MMALPythonComponent): + """ + Provides a simple component that writes all received buffers to the + specified *output* until a frame with the *done* flag is seen (defaults to + MMAL_BUFFER_HEADER_FLAG_EOS indicating End Of Stream). + + The component provides all picamera's usual IO-handling characteristics; if + *output* is a string, a file with that name will be opened as the output + and closed implicitly when the component is closed. Otherwise, the output + will not be closed implicitly (the component did not open it, so the + assumption is that closing *output* is the caller's responsibility). If + *output* is an object with a ``write`` method it is assumed to be a + file-like object and is used as is. Otherwise, *output* is assumed to be a + writeable object supporting the buffer protocol (which is wrapped in a + :class:`BufferIO` stream). + """ + __slots__ = ('_opened', '_stream', '_done', '_event') + + def __init__(self, output, done=mmal.MMAL_BUFFER_HEADER_FLAG_EOS): + super(MMALPythonTarget, self).__init__(name='py.target', outputs=0) + self._stream, self._opened = open_stream(output) + self._done = done + self._event = Event() + # Accept all the formats picamera generally produces (user can add + # other esoteric stuff if they need to) + self.inputs[0].supported_formats = { + mmal.MMAL_ENCODING_MJPEG, + mmal.MMAL_ENCODING_H264, + mmal.MMAL_ENCODING_JPEG, + mmal.MMAL_ENCODING_GIF, + mmal.MMAL_ENCODING_PNG, + mmal.MMAL_ENCODING_BMP, + mmal.MMAL_ENCODING_I420, + mmal.MMAL_ENCODING_RGB24, + mmal.MMAL_ENCODING_BGR24, + mmal.MMAL_ENCODING_RGBA, + mmal.MMAL_ENCODING_BGRA, + } + + def close(self): + super(MMALPythonTarget, self).close() + close_stream(self._stream, self._opened) + + def enable(self): + self._event.clear() + super(MMALPythonTarget, self).enable() + + def wait(self, timeout=None): + """ + Wait for the output to be "complete" as defined by the constructor's + *done* parameter. If *timeout* is specified it is the number of seconds + to wait for completion. The method returns ``True`` if the target + completed within the specified timeout and ``False`` otherwise. + """ + return self._event.wait(timeout) + + def _handle_frame(self, port, buf): + self._stream.write(buf.data) + if buf.flags & self._done: + self._event.set() + return True + return False + + +class MMALPythonConnection(MMALBaseConnection): + """ + Represents a connection between an :class:`MMALPythonBaseComponent` and a + :class:`MMALBaseComponent` or another :class:`MMALPythonBaseComponent`. + The constructor accepts arguments providing the *source* :class:`MMALPort` + (or :class:`MMALPythonPort`) and *target* :class:`MMALPort` (or + :class:`MMALPythonPort`). + + The *formats* parameter specifies an iterable of formats (in preference + order) that the connection may attempt when negotiating formats between + the two ports. If this is ``None``, or an empty iterable, no negotiation + will take place and the source port's format will simply be copied to the + target port. Otherwise, the iterable will be worked through in order until + a format acceptable to both ports is discovered. + + The *callback* parameter can optionally specify a callable which will be + executed for each buffer that traverses the connection (providing an + opportunity to manipulate or drop that buffer). If specified, it must be a + callable which accepts two parameters: the :class:`MMALPythonConnection` + object sending the data, and the :class:`MMALBuffer` object containing + data. The callable may optionally manipulate the :class:`MMALBuffer` and + return it to permit it to continue traversing the connection, or return + ``None`` in which case the buffer will be released. + + .. data:: default_formats + :annotation: = (MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA) + + Class attribute defining the default formats used to negotiate + connections between Python and and MMAL components, in preference + order. Note that OPAQUE is not present in contrast with the default + formats in :class:`MMALConnection`. + """ + __slots__ = ('_enabled', '_callback') + + default_formats = ( + mmal.MMAL_ENCODING_I420, + mmal.MMAL_ENCODING_RGB24, + mmal.MMAL_ENCODING_BGR24, + mmal.MMAL_ENCODING_RGBA, + mmal.MMAL_ENCODING_BGRA, + ) + + def __init__( + self, source, target, formats=default_formats, callback=None): + if not ( + isinstance(source, MMALPythonPort) or + isinstance(target, MMALPythonPort) + ): + raise PiCameraValueError('use a real MMAL connection') + super(MMALPythonConnection, self).__init__(source, target, formats) + self._enabled = False + self._callback = callback + + def close(self): + self.disable() + super(MMALPythonConnection, self).close() + + @property + def enabled(self): + """ + Returns ``True`` if the connection is enabled. Use :meth:`enable` + and :meth:`disable` to control the state of the connection. + """ + return self._enabled + + def enable(self): + """ + Enable the connection. When a connection is enabled, data is + continually transferred from the output port of the source to the input + port of the target component. + """ + if not self._enabled: + self._enabled = True + if isinstance(self._target, MMALPythonPort): + # Connected python input ports require no callback + self._target.enable() + else: + # Connected MMAL input ports don't know they're connected so + # provide a dummy callback + self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True + self._target.enable(lambda port, buf: True) + if isinstance(self._source, MMALPythonPort): + # Connected python output ports are nothing more than thin + # proxies for the target input port; no callback required + self._source.enable() + else: + # Connected MMAL output ports are made to transfer their + # data to the Python input port + self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True + self._source.enable(self._transfer) + + def disable(self): + """ + Disables the connection. + """ + self._enabled = False + self._source.disable() + self._target.disable() + + def _transfer(self, port, buf): + while self._enabled: + try: + dest = self._target.get_buffer(timeout=0.01) + except PiCameraPortDisabled: + dest = None + if dest: + dest.copy_from(buf) + try: + self._target.send_buffer(dest) + except PiCameraPortDisabled: + pass + return False + + @property + def name(self): + return '%s/%s' % (self._source.name, self._target.name) + + def __repr__(self): + try: + return '' % self.name + except NameError: + return '' + diff --git a/picamera/renderers.py b/picamera/renderers.py new file mode 100644 index 0000000..d58ca30 --- /dev/null +++ b/picamera/renderers.py @@ -0,0 +1,605 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python camera library for the Rasperry-Pi camera module +# Copyright (c) 2013-2017 Dave Jones +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str equivalent to Py3's +str = type('') + +import ctypes as ct + +from . import mmal, mmalobj as mo +from .exc import ( + PiCameraRuntimeError, + PiCameraValueError, + mmal_check, + ) + + +class PiRenderer(object): + """ + Wraps :class:`~mmalobj.MMALRenderer` for use by PiCamera. + + The *parent* parameter specifies the :class:`PiCamera` instance that has + constructed this renderer. All other parameters set the initial values + of the correspondingly named attributes (e.g. the *layer* parameter + sets the initial value of the :attr:`layer` attribute, the *crop* parameter + sets the initial value of the :attr:`crop` attribute, etc). + + This base class isn't directly used by :class:`PiCamera`, but the two + derivatives defined below, :class:`PiOverlayRenderer` and + :class:`PiPreviewRenderer`, are used to produce overlays and the camera + preview respectively. + + .. versionchanged:: 1.14 + Added *anamorphic* parameter + """ + + def __init__( + self, parent, layer=0, alpha=255, fullscreen=True, window=None, + crop=None, rotation=0, vflip=False, hflip=False, anamorphic=False): + # Create and enable the renderer component + self._rotation = 0 + self._vflip = False + self._hflip = False + self.renderer = mo.MMALRenderer() + try: + self.layer = layer + self.alpha = alpha + self.fullscreen = fullscreen + self.anamorphic = anamorphic + if window is not None: + self.window = window + if crop is not None: + self.crop = crop + self.rotation = rotation + self.vflip = vflip + self.hflip = hflip + self.renderer.enable() + except: + self.renderer.close() + raise + + def close(self): + """ + Finalizes the renderer and deallocates all structures. + + This method is called by the camera prior to destroying the renderer + (or more precisely, letting it go out of scope to permit the garbage + collector to destroy it at some future time). + """ + if self.renderer: + self.renderer.close() + self.renderer = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + def _get_alpha(self): + return self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION].alpha + def _set_alpha(self, value): + try: + if not (0 <= value <= 255): + raise PiCameraValueError( + "Invalid alpha value: %d (valid range 0..255)" % value) + except TypeError: + raise PiCameraValueError("Invalid alpha value: %s" % value) + mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] + mp.set = mmal.MMAL_DISPLAY_SET_ALPHA + mp.alpha = value + self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp + alpha = property(_get_alpha, _set_alpha, doc="""\ + Retrieves or sets the opacity of the renderer. + + When queried, the :attr:`alpha` property returns a value between 0 and + 255 indicating the opacity of the renderer, where 0 is completely + transparent and 255 is completely opaque. The default value is 255. The + property can be set while recordings or previews are in progress. + + .. note:: + + If the renderer is being fed RGBA data (as in partially transparent + overlays), the alpha property will be ignored. + """) + + def _get_layer(self): + return self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION].layer + def _set_layer(self, value): + try: + if not (0 <= value <= 255): + raise PiCameraValueError( + "Invalid layer value: %d (valid range 0..255)" % value) + except TypeError: + raise PiCameraValueError("Invalid layer value: %s" % value) + mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] + mp.set = mmal.MMAL_DISPLAY_SET_LAYER + mp.layer = value + self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp + layer = property(_get_layer, _set_layer, doc="""\ + Retrieves or sets the layer of the renderer. + + The :attr:`layer` property is an integer which controls the layer that + the renderer occupies. Higher valued layers obscure lower valued layers + (with 0 being the "bottom" layer). The default value is 2. The property + can be set while recordings or previews are in progress. + """) + + def _get_fullscreen(self): + return self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION].fullscreen.value != mmal.MMAL_FALSE + def _set_fullscreen(self, value): + mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] + mp.set = mmal.MMAL_DISPLAY_SET_FULLSCREEN + mp.fullscreen = bool(value) + self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp + fullscreen = property(_get_fullscreen, _set_fullscreen, doc="""\ + Retrieves or sets whether the renderer appears full-screen. + + The :attr:`fullscreen` property is a bool which controls whether the + renderer takes up the entire display or not. When set to ``False``, the + :attr:`window` property can be used to control the precise size of the + renderer display. The property can be set while recordings or previews + are active. + """) + + def _get_anamorphic(self): + return self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION].noaspect.value != mmal.MMAL_FALSE + def _set_anamorphic(self, value): + mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] + mp.set = mmal.MMAL_DISPLAY_SET_NOASPECT + mp.noaspect = bool(value) + self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp + anamorphic = property(_get_anamorphic, _set_anamorphic, doc="""\ + Retrieves or sets whether the renderer is `anamorphic`_. + + The :attr:`anamorphic` property is a bool which controls whether the + renderer respects the `aspect ratio`_ of the source. When ``False`` + (the default) the source aspect ratio is respected. When set to + ``True``, the aspect ratio of the source is anamorphed. This can help + with things like 16:9 widescreen composite outputs for previews without + having to change the cameras output ratio. The property can be set + while recordings or previews are active. + + .. versionadded:: 1.14 + + .. _aspect ratio: https://en.wikipedia.org/wiki/Aspect_ratio_(image) + .. _anamorphic: https://en.wikipedia.org/wiki/Anamorphic_widescreen + """) + + def _get_window(self): + mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] + return ( + mp.dest_rect.x, + mp.dest_rect.y, + mp.dest_rect.width, + mp.dest_rect.height, + ) + def _set_window(self, value): + try: + x, y, w, h = value + except (TypeError, ValueError) as e: + raise PiCameraValueError( + "Invalid window rectangle (x, y, w, h) tuple: %s" % value) + mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] + mp.set = mmal.MMAL_DISPLAY_SET_DEST_RECT + mp.dest_rect = mmal.MMAL_RECT_T(x, y, w, h) + self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp + window = property(_get_window, _set_window, doc="""\ + Retrieves or sets the size of the renderer. + + When the :attr:`fullscreen` property is set to ``False``, the + :attr:`window` property specifies the size and position of the renderer + on the display. The property is a 4-tuple consisting of ``(x, y, width, + height)``. The property can be set while recordings or previews are + active. + """) + + def _get_crop(self): + mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] + return ( + mp.src_rect.x, + mp.src_rect.y, + mp.src_rect.width, + mp.src_rect.height, + ) + def _set_crop(self, value): + try: + x, y, w, h = value + except (TypeError, ValueError) as e: + raise PiCameraValueError( + "Invalid crop rectangle (x, y, w, h) tuple: %s" % value) + mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] + mp.set = mmal.MMAL_DISPLAY_SET_SRC_RECT + mp.src_rect = mmal.MMAL_RECT_T(x, y, w, h) + self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp + crop = property(_get_crop, _set_crop, doc="""\ + Retrieves or sets the area to read from the source. + + The :attr:`crop` property specifies the rectangular area that the + renderer will read from the source as a 4-tuple of ``(x, y, width, + height)``. The special value ``(0, 0, 0, 0)`` (which is also the + default) means to read entire area of the source. The property can be + set while recordings or previews are active. + + For example, if the camera's resolution is currently configured as + 1280x720, setting this attribute to ``(160, 160, 640, 400)`` will + crop the preview to the center 640x400 pixels of the input. Note that + this property does not affect the size of the output rectangle, + which is controlled with :attr:`fullscreen` and :attr:`window`. + + .. note:: + + This property only affects the renderer; it has no bearing on image + captures or recordings (unlike the :attr:`~PiCamera.zoom` property + of the :class:`PiCamera` class). + """) + + def _get_rotation(self): + return self._rotation + def _set_rotation(self, value): + try: + value = ((int(value) % 360) // 90) * 90 + except ValueError: + raise PiCameraValueError("Invalid rotation angle: %s" % value) + self._set_transform( + self._get_transform(value, self._vflip, self._hflip)) + self._rotation = value + rotation = property(_get_rotation, _set_rotation, doc="""\ + Retrieves or sets the current rotation of the renderer. + + When queried, the :attr:`rotation` property returns the rotation + applied to the renderer. Valid values are 0, 90, 180, and 270. + + When set, the property changes the rotation applied to the renderer's + output. The property can be set while recordings or previews are + active. The default is 0. + + .. note:: + + This property only affects the renderer; it has no bearing on image + captures or recordings (unlike the :attr:`~PiCamera.rotation` + property of the :class:`PiCamera` class). + """) + + def _get_vflip(self): + return self._vflip + def _set_vflip(self, value): + value = bool(value) + self._set_transform( + self._get_transform(self._rotation, value, self._hflip)) + self._vflip = value + vflip = property(_get_vflip, _set_vflip, doc="""\ + Retrieves or sets whether the renderer's output is vertically flipped. + + When queried, the :attr:`vflip` property returns a boolean indicating + whether or not the renderer's output is vertically flipped. The + property can be set while recordings or previews are in progress. The + default is ``False``. + + .. note:: + + This property only affects the renderer; it has no bearing on image + captures or recordings (unlike the :attr:`~PiCamera.vflip` property + of the :class:`PiCamera` class). + """) + + def _get_hflip(self): + return self._hflip + def _set_hflip(self, value): + value = bool(value) + self._set_transform( + self._get_transform(self._rotation, self._vflip, value)) + self._hflip = value + hflip = property(_get_hflip, _set_hflip, doc="""\ + Retrieves or sets whether the renderer's output is horizontally + flipped. + + When queried, the :attr:`vflip` property returns a boolean indicating + whether or not the renderer's output is horizontally flipped. The + property can be set while recordings or previews are in progress. The + default is ``False``. + + .. note:: + + This property only affects the renderer; it has no bearing on image + captures or recordings (unlike the :attr:`~PiCamera.hflip` property + of the :class:`PiCamera` class). + """) + + def _get_transform(self, rotate, vflip, hflip): + # Use a (horizontally) mirrored transform if one of vflip or hflip is + # set. If vflip is set, rotate by an extra 180 degrees to make up for + # the lack of a "true" vertical flip + mirror = vflip ^ hflip + if vflip: + rotate = (rotate + 180) % 360 + return { + (0, False): mmal.MMAL_DISPLAY_ROT0, + (90, False): mmal.MMAL_DISPLAY_ROT90, + (180, False): mmal.MMAL_DISPLAY_ROT180, + (270, False): mmal.MMAL_DISPLAY_ROT270, + (0, True): mmal.MMAL_DISPLAY_MIRROR_ROT0, + (90, True): mmal.MMAL_DISPLAY_MIRROR_ROT90, + (180, True): mmal.MMAL_DISPLAY_MIRROR_ROT180, + (270, True): mmal.MMAL_DISPLAY_MIRROR_ROT270, + }[(rotate, mirror)] + + def _set_transform(self, value): + mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] + mp.set = mmal.MMAL_DISPLAY_SET_TRANSFORM + mp.transform = value + self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp + + +class PiOverlayRenderer(PiRenderer): + """ + Represents an :class:`~mmalobj.MMALRenderer` with a static source for + overlays. + + This class descends from :class:`PiRenderer` and adds a static *source* for + the :class:`~mmalobj.MMALRenderer`. The *source* must be an object that + supports the :ref:`buffer protocol ` in one of the supported + formats. + + The optional *resolution* parameter specifies the size of the *source* as a + ``(width, height)`` tuple. If this is omitted or ``None`` then the + resolution is assumed to be the same as the parent camera's current + :attr:`~PiCamera.resolution`. The optional *format* parameter specifies the + encoding of the *source*. This can be one of the unencoded formats: + ``'yuv'``, ``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. If omitted or + ``None``, *format* will be guessed based on the size of *source* (assuming + 3 bytes for `RGB`_, and 4 bytes for `RGBA`_). + + The length of *source* must take into account that widths are rounded up to + the nearest multiple of 32, and heights to the nearest multiple of 16. For + example, if *resolution* is ``(1280, 720)``, and *format* is ``'rgb'`` then + *source* must be a buffer with length 1280 x 720 x 3 bytes, or 2,764,800 + bytes (because 1280 is a multiple of 32, and 720 is a multiple of 16 no + extra rounding is required). However, if *resolution* is ``(97, 57)``, and + *format* is ``'rgb'`` then *source* must be a buffer with length 128 x 64 x + 3 bytes, or 24,576 bytes (pixels beyond column 97 and row 57 in the source + will be ignored). + + The *layer*, *alpha*, *fullscreen*, and *window* parameters are the same + as in :class:`PiRenderer`. + + .. _RGB: https://en.wikipedia.org/wiki/RGB + .. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space + + .. versionchanged:: 1.13 + Added *format* parameter + + .. versionchanged:: 1.14 + Added *anamorphic* parameter + """ + + SOURCE_BPP = { + 3: 'rgb', + 4: 'rgba', + } + + SOURCE_ENCODINGS = { + 'yuv': mmal.MMAL_ENCODING_I420, + 'rgb': mmal.MMAL_ENCODING_RGB24, + 'rgba': mmal.MMAL_ENCODING_RGBA, + 'bgr': mmal.MMAL_ENCODING_BGR24, + 'bgra': mmal.MMAL_ENCODING_BGRA, + } + + def __init__( + self, parent, source, resolution=None, format=None, layer=0, + alpha=255, fullscreen=True, window=None, crop=None, rotation=0, + vflip=False, hflip=False, anamorphic=False): + super(PiOverlayRenderer, self).__init__( + parent, layer, alpha, fullscreen, window, crop, + rotation, vflip, hflip, anamorphic) + + # Copy format from camera's preview port, then adjust the encoding to + # RGB888 or RGBA and optionally adjust the resolution and size + if resolution is not None: + self.renderer.inputs[0].framesize = resolution + else: + self.renderer.inputs[0].framesize = parent.resolution + self.renderer.inputs[0].framerate = 0 + if format is None: + source_len = mo.buffer_bytes(source) + plane_size = self.renderer.inputs[0].framesize.pad() + plane_len = plane_size.width * plane_size.height + try: + format = self.SOURCE_BPP[source_len // plane_len] + except KeyError: + raise PiCameraValueError( + 'unable to determine format from source size') + try: + self.renderer.inputs[0].format = self.SOURCE_ENCODINGS[format] + except KeyError: + raise PiCameraValueError('unknown format %s' % format) + self.renderer.inputs[0].commit() + # The following callback is required to prevent the mmalobj layer + # automatically passing buffers back to the port + self.renderer.inputs[0].enable(callback=lambda port, buf: True) + self.update(source) + + def update(self, source): + """ + Update the overlay with a new source of data. + + The new *source* buffer must have the same size as the original buffer + used to create the overlay. There is currently no method for changing + the size of an existing overlay (remove and recreate the overlay if you + require this). + + .. note:: + + If you repeatedly update an overlay renderer, you must make sure + that you do so at a rate equal to, or slower than, the camera's + framerate. Going faster will rapidly starve the renderer's pool of + buffers leading to a runtime error. + """ + buf = self.renderer.inputs[0].get_buffer() + buf.data = source + self.renderer.inputs[0].send_buffer(buf) + + +class PiPreviewRenderer(PiRenderer): + """ + Represents an :class:`~mmalobj.MMALRenderer` which uses the camera's + preview as a source. + + This class descends from :class:`PiRenderer` and adds an + :class:`~mmalobj.MMALConnection` to connect the renderer to an MMAL port. + The *source* parameter specifies the :class:`~mmalobj.MMALPort` to connect + to the renderer. The *resolution* parameter can be used to override the + framesize of the *source*. See :attr:`resolution` for details of when this + is useful. + + All other parameters are the same as in :class:`PiRenderer`. + + .. versionchanged:: 1.14 + Added *anamorphic* parameter + """ + + def __init__( + self, parent, source, resolution=None, layer=2, alpha=255, + fullscreen=True, window=None, crop=None, rotation=0, vflip=False, + hflip=False, anamorphic=False): + super(PiPreviewRenderer, self).__init__( + parent, layer, alpha, fullscreen, window, crop, + rotation, vflip, hflip, anamorphic) + self._parent = parent + if resolution is not None: + resolution = mo.to_resolution(resolution) + source.framesize = resolution + self.renderer.inputs[0].connect(source).enable() + + def _get_resolution(self): + result = self._parent._camera.outputs[self._parent.CAMERA_PREVIEW_PORT].framesize + if result != self._parent.resolution: + return result + else: + return None + def _set_resolution(self, value): + if value is not None: + value = mo.to_resolution(value) + if ( + value.width > self._parent.resolution.width or + value.height > self._parent.resolution.height + ): + raise PiCameraValueError( + 'preview resolution cannot exceed camera resolution') + self.renderer.connection.disable() + if value is None: + value = self._parent.resolution + self._parent._camera.outputs[self._parent.CAMERA_PREVIEW_PORT].framesize = value + self._parent._camera.outputs[self._parent.CAMERA_PREVIEW_PORT].commit() + self.renderer.connection.enable() + resolution = property(_get_resolution, _set_resolution, doc="""\ + Retrieves or sets the resolution of the preview renderer. + + By default, the preview's resolution matches the camera's resolution. + However, particularly high resolutions (such as the maximum resolution + of the V2 camera module) can cause issues. In this case, you may wish + to set a lower resolution for the preview that the camera's resolution. + + When queried, the :attr:`resolution` property returns ``None`` if the + preview's resolution is derived from the camera's. In this case, + changing the camera's resolution will also cause the preview's + resolution to change. Otherwise, it returns the current preview + resolution as a tuple. + + .. note:: + + The preview resolution cannot be greater than the camera's + resolution. If you set a preview resolution, then change the + camera's resolution below the preview's resolution, this property + will silently revert to ``None``, meaning the preview's resolution + will follow the camera's resolution. + + When set, the property reconfigures the preview renderer with the new + resolution. As a special case, setting the property to ``None`` will + cause the preview to follow the camera's resolution once more. The + property can be set while recordings are in progress. The default is + ``None``. + + .. note:: + + This property only affects the renderer; it has no bearing on image + captures or recordings (unlike the :attr:`~PiCamera.resolution` + property of the :class:`PiCamera` class). + + .. versionadded:: 1.11 + """) + + +class PiNullSink(object): + """ + Implements an :class:`~mmalobj.MMALNullSink` which can be used in place of + a renderer. + + The *parent* parameter specifies the :class:`PiCamera` instance which + constructed this :class:`~mmalobj.MMALNullSink`. The *source* parameter + specifies the :class:`~mmalobj.MMALPort` which the null-sink should connect + to its input. + + The null-sink can act as a drop-in replacement for :class:`PiRenderer` in + most cases, but obviously doesn't implement attributes like ``alpha``, + ``layer``, etc. as it simply dumps any incoming frames. This is also the + reason that this class doesn't derive from :class:`PiRenderer` like all + other classes in this module. + """ + + def __init__(self, parent, source): + self.renderer = mo.MMALNullSink() + self.renderer.enable() + self.renderer.inputs[0].connect(source).enable() + + def close(self): + """ + Finalizes the null-sink and deallocates all structures. + + This method is called by the camera prior to destroying the null-sink + (or more precisely, letting it go out of scope to permit the garbage + collector to destroy it at some future time). + """ + if self.renderer: + self.renderer.close() + self.renderer = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() diff --git a/picamera/streams.py b/picamera/streams.py new file mode 100644 index 0000000..7c26c84 --- /dev/null +++ b/picamera/streams.py @@ -0,0 +1,833 @@ +# vim: set et sw=4 sts=4 fileencoding=utf-8: +# +# Python camera library for the Rasperry-Pi camera module +# Copyright (c) 2013-2017 Dave Jones +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holder nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + unicode_literals, + print_function, + division, + absolute_import, + ) + +# Make Py2's str equivalent to Py3's +str = type('') + + +import io +from threading import RLock +from collections import deque +from operator import attrgetter +from weakref import ref + +from picamera.exc import PiCameraValueError +from picamera.frames import PiVideoFrame, PiVideoFrameType + + +class BufferIO(io.IOBase): + """ + A stream which uses a :class:`memoryview` for storage. + + This is used internally by picamera for capturing directly to an existing + object which supports the buffer protocol (like a numpy array). Because the + underlying storage is fixed in size, the stream also has a fixed size and + will raise an :exc:`IOError` exception if an attempt is made to write + beyond the end of the buffer (though seek beyond the end is supported). + + Users should never need this class directly. + """ + __slots__ = ('_buf', '_pos', '_size') + + def __init__(self, obj): + self._buf = memoryview(obj) + if self._buf.ndim > 1 or self._buf.format != 'B': + try: + # Py2.7 doesn't have memoryview.cast + self._buf = self._buf.cast('B') + except AttributeError: + raise ValueError( + 'buffer object must be one-dimensional and have unsigned ' + 'byte format ("B")') + self._pos = 0 + self._size = self._buf.shape[0] + + def close(self): + super(BufferIO, self).close() + try: + self._buf.release() + except AttributeError: + # Py2.7 doesn't have memoryview.release + pass + + def _check_open(self): + if self.closed: + raise ValueError('I/O operation on a closed stream') + + @property + def size(self): + """ + Return the maximum size of the buffer in bytes. + """ + return self._size + + def readable(self): + """ + Returns ``True``, indicating that the stream supports :meth:`read`. + """ + self._check_open() + return True + + def writable(self): + """ + Returns ``True``, indicating that the stream supports :meth:`write`. + """ + self._check_open() + return not self._buf.readonly + + def seekable(self): + """ + Returns ``True``, indicating the stream supports :meth:`seek` and + :meth:`tell`. + """ + self._check_open() + return True + + def getvalue(self): + """ + Return ``bytes`` containing the entire contents of the buffer. + """ + with self.lock: + return self._buf.tobytes() + + def tell(self): + """ + Return the current buffer position. + """ + self._check_open() + return self._pos + + def seek(self, offset, whence=io.SEEK_SET): + """ + Change the buffer position to the given byte *offset*. *offset* is + interpreted relative to the position indicated by *whence*. Values for + *whence* are: + + * ``SEEK_SET`` or ``0`` – start of the buffer (the default); *offset* + should be zero or positive + + * ``SEEK_CUR`` or ``1`` – current buffer position; *offset* may be + negative + + * ``SEEK_END`` or ``2`` – end of the buffer; *offset* is usually + negative + + Return the new absolute position. + """ + self._check_open() + if whence == io.SEEK_CUR: + offset = self._pos + offset + elif whence == io.SEEK_END: + offset = self.size + offset + if offset < 0: + raise ValueError( + 'New position is before the start of the stream') + self._pos = offset + return self._pos + + def read(self, n=-1): + """ + Read up to *n* bytes from the buffer and return them. As a convenience, + if *n* is unspecified or -1, :meth:`readall` is called. Fewer than *n* + bytes may be returned if there are fewer than *n* bytes from the + current buffer position to the end of the buffer. + + If 0 bytes are returned, and *n* was not 0, this indicates end of the + buffer. + """ + self._check_open() + if n < 0: + return self.readall() + elif n == 0: + return b'' + else: + result = self._buf[self._pos:self._pos + n].tobytes() + self._pos += len(result) + return result + + def readinto(self, b): + """ + Read bytes into a pre-allocated, writable bytes-like object b, and + return the number of bytes read. + """ + self._check_open() + result = max(0, min(len(b), self._size - self._pos)) + if result == 0: + return 0 + else: + b[:result] = self._buf[self._pos:self._pos + result] + return result + + def readall(self): + """ + Read and return all bytes from the buffer until EOF. + """ + return self.read(max(0, self.size - self._pos)) + + def truncate(self, size=None): + """ + Raises :exc:`NotImplementedError` as the underlying buffer cannot be + resized. + """ + raise NotImplementedError('cannot resize a BufferIO stream') + + def write(self, b): + """ + Write the given bytes or bytearray object, *b*, to the underlying + buffer and return the number of bytes written. If the underlying + buffer isn't large enough to contain all the bytes of *b*, as many + bytes as possible will be written before raising :exc:`IOError`. + """ + self._check_open() + if self._buf.readonly: + raise IOError('buffer object is not writeable') + excess = max(0, len(b) - (self.size - self._pos)) + result = len(b) - excess + if excess: + self._buf[self._pos:self._pos + result] = b[:-excess] + else: + self._buf[self._pos:self._pos + result] = b + self._pos += result + return result + + +class CircularIO(io.IOBase): + """ + A thread-safe stream which uses a ring buffer for storage. + + CircularIO provides an in-memory stream similar to the :class:`io.BytesIO` + class. However, unlike :class:`io.BytesIO` its underlying storage is a + `ring buffer`_ with a fixed maximum size. Once the maximum size is reached, + writing effectively loops round to the beginning to the ring and starts + overwriting the oldest content. + + Actually, this ring buffer is slightly different to "traditional" ring + buffers. This ring buffer is optimized for camera usage which is expected + to be read-light, write-heavy, and with writes *mostly* aligned to frame + boundaries. Internally, the stream simply references each chunk written and + drops references each time the overall size of the stream would exceed the + specified limit. + + As a result the ring buffer doesn't stay strictly at its allocated limit as + traditional ring buffers do. It also drops entire writes when the limit is + reached (this is a desirable behaviour because it means that often whole + frames are dropped from the start of the stream, rather than leaving + partial frames at the start as in a traditional ring buffer). For example: + + .. code-block:: pycon + + >>> stream = CircularIO(size=10) + >>> stream.write(b'abc') + >>> stream.write(b'def') + >>> stream.getvalue() + b'abcdef' + >>> stream.write(b'ghijk') + >>> stream.getvalue() + b'defghijk' + + In a traditional ring buffer, one would expect the last ``getvalue()`` call + to return ``'bcdefghijk'`` as only the first character would be lost at the + limit of 10 bytes. However, this ring buffer has dropped the entire write + of ``'abc'``. + + The *size* parameter specifies the maximum size of the stream in bytes. The + :meth:`read`, :meth:`tell`, and :meth:`seek` methods all operate + equivalently to those in :class:`io.BytesIO` whilst :meth:`write` only + differs in the wrapping behaviour described above. A :meth:`read1` method + is also provided for efficient reading of the underlying ring buffer in + write-sized chunks (or less). + + A re-entrant threading lock guards all operations, and is accessible for + external use via the :attr:`lock` attribute. + + The performance of the class is geared toward faster writing than reading + on the assumption that writing will be the common operation and reading the + rare operation (a reasonable assumption for the camera use-case, but not + necessarily for more general usage). + + .. _ring buffer: https://en.wikipedia.org/wiki/Circular_buffer + """ + def __init__(self, size): + if size < 1: + raise ValueError('size must be a positive integer') + self._lock = RLock() + self._data = deque() + self._size = size + self._length = 0 + self._pos = 0 + self._pos_index = 0 + self._pos_offset = 0 + + def _check_open(self): + if self.closed: + raise ValueError('I/O operation on a closed stream') + + @property + def lock(self): + """ + A re-entrant threading lock which is used to guard all operations. + """ + return self._lock + + @property + def size(self): + """ + Return the maximum size of the buffer in bytes. + """ + return self._size + + def readable(self): + """ + Returns ``True``, indicating that the stream supports :meth:`read`. + """ + self._check_open() + return True + + def writable(self): + """ + Returns ``True``, indicating that the stream supports :meth:`write`. + """ + self._check_open() + return True + + def seekable(self): + """ + Returns ``True``, indicating the stream supports :meth:`seek` and + :meth:`tell`. + """ + self._check_open() + return True + + def getvalue(self): + """ + Return ``bytes`` containing the entire contents of the buffer. + """ + with self.lock: + return b''.join(self._data) + + def _set_pos(self, value): + self._pos = value + self._pos_index = -1 + self._pos_offset = chunk_pos = 0 + for self._pos_index, chunk in enumerate(self._data): + if chunk_pos + len(chunk) > value: + self._pos_offset = value - chunk_pos + return + else: + chunk_pos += len(chunk) + self._pos_index += 1 + self._pos_offset = value - chunk_pos + + def tell(self): + """ + Return the current stream position. + """ + self._check_open() + with self.lock: + return self._pos + + def seek(self, offset, whence=io.SEEK_SET): + """ + Change the stream position to the given byte *offset*. *offset* is + interpreted relative to the position indicated by *whence*. Values for + *whence* are: + + * ``SEEK_SET`` or ``0`` – start of the stream (the default); *offset* + should be zero or positive + + * ``SEEK_CUR`` or ``1`` – current stream position; *offset* may be + negative + + * ``SEEK_END`` or ``2`` – end of the stream; *offset* is usually + negative + + Return the new absolute position. + """ + self._check_open() + with self.lock: + if whence == io.SEEK_CUR: + offset = self._pos + offset + elif whence == io.SEEK_END: + offset = self._length + offset + if offset < 0: + raise ValueError( + 'New position is before the start of the stream') + self._set_pos(offset) + return self._pos + + def read(self, n=-1): + """ + Read up to *n* bytes from the stream and return them. As a convenience, + if *n* is unspecified or -1, :meth:`readall` is called. Fewer than *n* + bytes may be returned if there are fewer than *n* bytes from the + current stream position to the end of the stream. + + If 0 bytes are returned, and *n* was not 0, this indicates end of the + stream. + """ + self._check_open() + if n < 0: + return self.readall() + elif n == 0: + return b'' + else: + with self.lock: + if self._pos >= self._length: + return b'' + from_index, from_offset = self._pos_index, self._pos_offset + self._set_pos(self._pos + n) + result = self._data[from_index][from_offset:from_offset + n] + # Bah ... can't slice a deque + for i in range(from_index + 1, self._pos_index): + result += self._data[i] + if from_index < self._pos_index < len(self._data): + result += self._data[self._pos_index][:self._pos_offset] + return result + + def readall(self): + """ + Read and return all bytes from the stream until EOF, using multiple + calls to the stream if necessary. + """ + return self.read(max(0, self._length - self._pos)) + + def read1(self, n=-1): + """ + Read up to *n* bytes from the stream using only a single call to the + underlying object. + + In the case of :class:`CircularIO` this roughly corresponds to + returning the content from the current position up to the end of the + write that added that content to the stream (assuming no subsequent + writes overwrote the content). :meth:`read1` is particularly useful + for efficient copying of the stream's content. + """ + self._check_open() + with self.lock: + if self._pos == self._length: + return b'' + chunk = self._data[self._pos_index] + if n == -1: + n = len(chunk) - self._pos_offset + result = chunk[self._pos_offset:self._pos_offset + n] + self._pos += len(result) + self._pos_offset += n + if self._pos_offset >= len(chunk): + self._pos_index += 1 + self._pos_offset = 0 + return result + + def truncate(self, size=None): + """ + Resize the stream to the given *size* in bytes (or the current position + if *size* is not specified). This resizing can extend or reduce the + current stream size. In case of extension, the contents of the new file + area will be NUL (``\\x00``) bytes. The new stream size is returned. + + The current stream position isn’t changed unless the resizing is + expanding the stream, in which case it may be set to the maximum stream + size if the expansion causes the ring buffer to loop around. + """ + self._check_open() + with self.lock: + if size is None: + size = self._pos + if size < 0: + raise ValueError('size must be zero, or a positive integer') + if size > self._length: + # Backfill the space between stream end and current position + # with NUL bytes + fill = b'\x00' * (size - self._length) + self._set_pos(self._length) + self.write(fill) + elif size < self._length: + # Lop off chunks until we get to the last one at the truncation + # point, and slice that one + save_pos = self._pos + self._set_pos(size) + while self._pos_index < len(self._data) - 1: + self._data.pop() + if self._pos_offset > 0: + self._data[self._pos_index] = self._data[self._pos_index][:self._pos_offset] + self._pos_index += 1 + self._pos_offset = 0 + else: + self._data.pop() + self._length = size + if self._pos != save_pos: + self._set_pos(save_pos) + + def write(self, b): + """ + Write the given bytes or bytearray object, *b*, to the underlying + stream and return the number of bytes written. + """ + self._check_open() + b = bytes(b) + with self.lock: + # Special case: stream position is beyond the end of the stream. + # Call truncate to backfill space first + if self._pos > self._length: + self.truncate() + result = len(b) + if self._pos == self._length: + # Fast path: stream position is at the end of the stream so + # just append a new chunk + self._data.append(b) + self._length += len(b) + self._pos = self._length + self._pos_index = len(self._data) + self._pos_offset = 0 + else: + # Slow path: stream position is somewhere in the middle; + # overwrite bytes in the current (and if necessary, subsequent) + # chunk(s), without extending them. If we reach the end of the + # stream, call ourselves recursively to continue down the fast + # path + while b and (self._pos < self._length): + chunk = self._data[self._pos_index] + head = b[:len(chunk) - self._pos_offset] + assert head + b = b[len(head):] + self._data[self._pos_index] = b''.join(( + chunk[:self._pos_offset], + head, + chunk[self._pos_offset + len(head):] + )) + self._pos += len(head) + if self._pos_offset + len(head) >= len(chunk): + self._pos_index += 1 + self._pos_offset = 0 + else: + self._pos_offset += len(head) + if b: + self.write(b) + # If the stream is now beyond the specified size limit, remove + # whole chunks until the size is within the limit again + while self._length > self._size: + chunk = self._data.popleft() + self._length -= len(chunk) + self._pos -= len(chunk) + self._pos_index -= 1 + # no need to adjust self._pos_offset + return result + + +class PiCameraDequeHack(deque): + def __init__(self, stream): + super(PiCameraDequeHack, self).__init__() + self.stream = ref(stream) # avoid a circular ref + + def append(self, item): + # Include the frame's metadata. + frame = self.stream()._get_frame() + return super(PiCameraDequeHack, self).append((item, frame)) + + def pop(self): + return super(PiCameraDequeHack, self).pop()[0] + + def popleft(self): + return super(PiCameraDequeHack, self).popleft()[0] + + def __getitem__(self, index): + return super(PiCameraDequeHack, self).__getitem__(index)[0] + + def __setitem__(self, index, value): + frame = super(PiCameraDequeHack, self).__getitem__(index)[1] + return super(PiCameraDequeHack, self).__setitem__(index, (value, frame)) + + def __iter__(self): + for item, frame in self.iter_both(False): + yield item + + def __reversed__(self): + for item, frame in self.iter_both(True): + yield item + + def iter_both(self, reverse): + if reverse: + return super(PiCameraDequeHack, self).__reversed__() + else: + return super(PiCameraDequeHack, self).__iter__() + + +class PiCameraDequeFrames(object): + def __init__(self, stream): + super(PiCameraDequeFrames, self).__init__() + self.stream = ref(stream) # avoid a circular ref + + def __iter__(self): + with self.stream().lock: + pos = 0 + for item, frame in self.stream()._data.iter_both(False): + pos += len(item) + if frame: + # Rewrite the video_size and split_size attributes + # according to the current position of the chunk + frame = PiVideoFrame( + index=frame.index, + frame_type=frame.frame_type, + frame_size=frame.frame_size, + video_size=pos, + split_size=pos, + timestamp=frame.timestamp, + complete=frame.complete, + ) + # Only yield the frame meta-data if the start of the frame + # still exists in the stream + if pos - frame.frame_size >= 0: + yield frame + + def __reversed__(self): + with self.stream().lock: + pos = self.stream()._length + for item, frame in self.stream()._data.iter_both(True): + if frame: + frame = PiVideoFrame( + index=frame.index, + frame_type=frame.frame_type, + frame_size=frame.frame_size, + video_size=pos, + split_size=pos, + timestamp=frame.timestamp, + complete=frame.complete, + ) + if pos - frame.frame_size >= 0: + yield frame + pos -= len(item) + + +class PiCameraCircularIO(CircularIO): + """ + A derivative of :class:`CircularIO` which tracks camera frames. + + PiCameraCircularIO provides an in-memory stream based on a ring buffer. It + is a specialization of :class:`CircularIO` which associates video frame + meta-data with the recorded stream, accessible from the :attr:`frames` + property. + + .. warning:: + + The class makes a couple of assumptions which will cause the frame + meta-data tracking to break if they are not adhered to: + + * the stream is only ever appended to - no writes ever start from + the middle of the stream + + * the stream is never truncated (from the right; being ring buffer + based, left truncation will occur automatically); the exception + to this is the :meth:`clear` method. + + The *camera* parameter specifies the :class:`PiCamera` instance that will + be recording video to the stream. If specified, the *size* parameter + determines the maximum size of the stream in bytes. If *size* is not + specified (or ``None``), then *seconds* must be specified instead. This + provides the maximum length of the stream in seconds, assuming a data rate + in bits-per-second given by the *bitrate* parameter (which defaults to + ``17000000``, or 17Mbps, which is also the default bitrate used for video + recording by :class:`PiCamera`). You cannot specify both *size* and + *seconds*. + + The *splitter_port* parameter specifies the port of the built-in splitter + that the video encoder will be attached to. This defaults to ``1`` and most + users will have no need to specify anything different. If you do specify + something else, ensure it is equal to the *splitter_port* parameter of the + corresponding call to :meth:`~PiCamera.start_recording`. For example:: + + import picamera + + with picamera.PiCamera() as camera: + with picamera.PiCameraCircularIO(camera, splitter_port=2) as stream: + camera.start_recording(stream, format='h264', splitter_port=2) + camera.wait_recording(10, splitter_port=2) + camera.stop_recording(splitter_port=2) + + .. attribute:: frames + + Returns an iterator over the frame meta-data. + + As the camera records video to the stream, the class captures the + meta-data associated with each frame (in the form of a + :class:`PiVideoFrame` tuple), discarding meta-data for frames which are + no longer fully stored within the underlying ring buffer. You can use + the frame meta-data to locate, for example, the first keyframe present + in the stream in order to determine an appropriate range to extract. + """ + def __init__( + self, camera, size=None, seconds=None, bitrate=17000000, + splitter_port=1): + if size is None and seconds is None: + raise PiCameraValueError('You must specify either size, or seconds') + if size is not None and seconds is not None: + raise PiCameraValueError('You cannot specify both size and seconds') + if seconds is not None: + size = bitrate * seconds // 8 + super(PiCameraCircularIO, self).__init__(size) + try: + camera._encoders + except AttributeError: + raise PiCameraValueError('camera must be a valid PiCamera object') + self.camera = camera + self.splitter_port = splitter_port + self._data = PiCameraDequeHack(self) + self._frames = PiCameraDequeFrames(self) + + def _get_frame(self): + """ + Return frame metadata from latest frame, when it is complete. + """ + encoder = self.camera._encoders[self.splitter_port] + return encoder.frame if encoder.frame.complete else None + + @property + def frames(self): + """ + An iterable which contains the meta-data (:class:`PiVideoFrame` + objects) for all complete frames currently stored in the circular + buffer. + """ + return self._frames + + def clear(self): + """ + Resets the stream to empty safely. + + This method truncates the stream to empty, and clears the associated + frame meta-data too, ensuring that subsequent writes operate correctly + (see the warning in the :class:`PiCameraCircularIO` class + documentation). + """ + with self.lock: + self.seek(0) + self.truncate() + + def _find(self, field, criteria, first_frame): + first = last = None + attr = attrgetter(field) + for frame in reversed(self._frames): + if last is None: + last = frame + if first_frame in (None, frame.frame_type): + first = frame + if last is not None and attr(last) - attr(frame) >= criteria: + break + if last is not None and attr(last) - attr(frame) >= criteria: + break + return first, last + + def _find_all(self, first_frame): + chunks = [] + first = last = None + for frame in reversed(self._frames): + last = frame + break + for frame in self._frames: + if first_frame in (None, frame.frame_type): + first = frame + break + return first, last + + def copy_to( + self, output, size=None, seconds=None, frames=None, + first_frame=PiVideoFrameType.sps_header): + """ + copy_to(output, size=None, seconds=None, frames=None, first_frame=PiVideoFrameType.sps_header) + + Copies content from the stream to *output*. + + By default, this method copies all complete frames from the circular + stream to the filename or file-like object given by *output*. + + If *size* is specified then the copy will be limited to the whole + number of frames that fit within the specified number of bytes. If + *seconds* if specified, then the copy will be limited to that number of + seconds worth of frames. If *frames* is specified then the copy will + be limited to that number of frames. Only one of *size*, *seconds*, or + *frames* can be specified. If none is specified, all frames are copied. + + If *first_frame* is specified, it defines the frame type of the first + frame to be copied. By default this is + :attr:`~PiVideoFrameType.sps_header` as this must usually be the first + frame in an H264 stream. If *first_frame* is ``None``, not such limit + will be applied. + + .. warning:: + + Note that if a frame of the specified type (e.g. SPS header) cannot + be found within the specified number of seconds, bytes, or frames, + then this method will simply copy nothing (but no error will be + raised). + + The stream's position is not affected by this method. + """ + if (size, seconds, frames).count(None) < 2: + raise PiCameraValueError( + 'You can only specify one of size, seconds, or frames') + if isinstance(output, bytes): + output = output.decode('utf-8') + opened = isinstance(output, str) + if opened: + output = io.open(output, 'wb') + try: + with self.lock: + if size is not None: + first, last = self._find('video_size', size, first_frame) + elif seconds is not None: + seconds = int(seconds * 1000000) + first, last = self._find('timestamp', seconds, first_frame) + elif frames is not None: + first, last = self._find('index', frames, first_frame) + else: + first, last = self._find_all(first_frame) + # Copy chunk references into a holding buffer; this allows us + # to release the lock on the stream quickly (in case recording + # is on-going) + chunks = [] + if first is not None and last is not None: + pos = 0 + for buf, frame in self._data.iter_both(False): + if pos > last.position + last.frame_size: + break + elif pos >= first.position: + chunks.append(buf) + pos += len(buf) + # Perform the actual I/O, copying chunks to the output + for buf in chunks: + output.write(buf) + return first, last + finally: + if opened: + output.close()