Vendor picamera

This commit is contained in:
Dan Ponte 2023-02-01 21:37:42 +00:00
parent 71e70571d9
commit cda05b1a91
13 changed files with 15820 additions and 0 deletions

114
picamera/__init__.py Normal file
View file

@ -0,0 +1,114 @@
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The picamera package consists of several modules which provide a pure Python
interface to the Raspberry Pi's camera module. The package is only intended to
run on a Raspberry Pi, and expects to be able to load the MMAL library
(libmmal.so) upon import.
The classes defined by most modules in this package are available directly from
the :mod:`picamera` namespace. In other words, the following code is typically
all that is required to access classes in the package::
import picamera
The :mod:`picamera.array` module is an exception to this as it depends on the
third-party `numpy`_ package (this avoids making numpy a mandatory dependency
for picamera).
.. _numpy: http://www.numpy.org/
The following sections document the various modules available within the
package:
* :mod:`picamera.camera`
* :mod:`picamera.encoders`
* :mod:`picamera.frames`
* :mod:`picamera.streams`
* :mod:`picamera.renderers`
* :mod:`picamera.color`
* :mod:`picamera.exc`
* :mod:`picamera.array`
"""
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
from picamera.exc import (
PiCameraWarning,
PiCameraDeprecated,
PiCameraFallback,
PiCameraAlphaStripping,
PiCameraResizerEncoding,
PiCameraError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraValueError,
PiCameraMMALError,
PiCameraPortDisabled,
mmal_check,
)
from picamera.mmalobj import PiResolution, PiFramerateRange, PiSensorMode
from picamera.camera import PiCamera
from picamera.display import PiDisplay
from picamera.frames import PiVideoFrame, PiVideoFrameType
from picamera.encoders import (
PiEncoder,
PiVideoEncoder,
PiImageEncoder,
PiRawMixin,
PiCookedVideoEncoder,
PiRawVideoEncoder,
PiOneImageEncoder,
PiMultiImageEncoder,
PiRawImageMixin,
PiCookedOneImageEncoder,
PiRawOneImageEncoder,
PiCookedMultiImageEncoder,
PiRawMultiImageEncoder,
)
from picamera.renderers import (
PiRenderer,
PiOverlayRenderer,
PiPreviewRenderer,
PiNullSink,
)
from picamera.streams import PiCameraCircularIO, CircularIO, BufferIO
from picamera.color import Color, Red, Green, Blue, Hue, Lightness, Saturation

908
picamera/array.py Normal file
View file

@ -0,0 +1,908 @@
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str and range equivalent to Py3's
native_str = str
str = type('')
try:
range = xrange
except NameError:
pass
import io
import ctypes as ct
import warnings
import numpy as np
from numpy.lib.stride_tricks import as_strided
from . import mmalobj as mo, mmal
from .exc import (
mmal_check,
PiCameraValueError,
PiCameraDeprecated,
PiCameraPortDisabled,
)
motion_dtype = np.dtype([
(native_str('x'), np.int8),
(native_str('y'), np.int8),
(native_str('sad'), np.uint16),
])
def raw_resolution(resolution, splitter=False):
"""
Round a (width, height) tuple up to the nearest multiple of 32 horizontally
and 16 vertically (as this is what the Pi's camera module does for
unencoded output).
"""
width, height = resolution
if splitter:
fwidth = (width + 15) & ~15
else:
fwidth = (width + 31) & ~31
fheight = (height + 15) & ~15
return fwidth, fheight
def bytes_to_yuv(data, resolution):
"""
Converts a bytes object containing YUV data to a `numpy`_ array.
"""
width, height = resolution
fwidth, fheight = raw_resolution(resolution)
y_len = fwidth * fheight
uv_len = (fwidth // 2) * (fheight // 2)
if len(data) != (y_len + 2 * uv_len):
raise PiCameraValueError(
'Incorrect buffer length for resolution %dx%d' % (width, height))
# Separate out the Y, U, and V values from the array
a = np.frombuffer(data, dtype=np.uint8)
Y = a[:y_len].reshape((fheight, fwidth))
Uq = a[y_len:-uv_len].reshape((fheight // 2, fwidth // 2))
Vq = a[-uv_len:].reshape((fheight // 2, fwidth // 2))
# Reshape the values into two dimensions, and double the size of the
# U and V values (which only have quarter resolution in YUV4:2:0)
U = np.empty_like(Y)
V = np.empty_like(Y)
U[0::2, 0::2] = Uq
U[0::2, 1::2] = Uq
U[1::2, 0::2] = Uq
U[1::2, 1::2] = Uq
V[0::2, 0::2] = Vq
V[0::2, 1::2] = Vq
V[1::2, 0::2] = Vq
V[1::2, 1::2] = Vq
# Stack the channels together and crop to the actual resolution
return np.dstack((Y, U, V))[:height, :width]
def bytes_to_rgb(data, resolution):
"""
Converts a bytes objects containing RGB/BGR data to a `numpy`_ array.
"""
width, height = resolution
fwidth, fheight = raw_resolution(resolution)
# Workaround: output from the video splitter is rounded to 16x16 instead
# of 32x16 (but only for RGB, and only when a resizer is not used)
if len(data) != (fwidth * fheight * 3):
fwidth, fheight = raw_resolution(resolution, splitter=True)
if len(data) != (fwidth * fheight * 3):
raise PiCameraValueError(
'Incorrect buffer length for resolution %dx%d' % (width, height))
# Crop to the actual resolution
return np.frombuffer(data, dtype=np.uint8).\
reshape((fheight, fwidth, 3))[:height, :width, :]
class PiArrayOutput(io.BytesIO):
"""
Base class for capture arrays.
This class extends :class:`io.BytesIO` with a `numpy`_ array which is
intended to be filled when :meth:`~io.IOBase.flush` is called (i.e. at the
end of capture).
.. attribute:: array
After :meth:`~io.IOBase.flush` is called, this attribute contains the
frame's data as a multi-dimensional `numpy`_ array. This is typically
organized with the dimensions ``(rows, columns, plane)``. Hence, an
RGB image with dimensions *x* and *y* would produce an array with shape
``(y, x, 3)``.
"""
def __init__(self, camera, size=None):
super(PiArrayOutput, self).__init__()
self.camera = camera
self.size = size
self.array = None
def close(self):
super(PiArrayOutput, self).close()
self.array = None
def truncate(self, size=None):
"""
Resize the stream to the given size in bytes (or the current position
if size is not specified). This resizing can extend or reduce the
current file size. The new file size is returned.
In prior versions of picamera, truncation also changed the position of
the stream (because prior versions of these stream classes were
non-seekable). This functionality is now deprecated; scripts should
use :meth:`~io.IOBase.seek` and :meth:`truncate` as one would with
regular :class:`~io.BytesIO` instances.
"""
if size is not None:
warnings.warn(
PiCameraDeprecated(
'This method changes the position of the stream to the '
'truncated length; this is deprecated functionality and '
'you should not rely on it (seek before or after truncate '
'to ensure position is consistent)'))
super(PiArrayOutput, self).truncate(size)
if size is not None:
self.seek(size)
class PiRGBArray(PiArrayOutput):
"""
Produces a 3-dimensional RGB array from an RGB capture.
This custom output class can be used to easily obtain a 3-dimensional numpy
array, organized (rows, columns, colors), from an unencoded RGB capture.
The array is accessed via the :attr:`~PiArrayOutput.array` attribute. For
example::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as output:
camera.capture(output, 'rgb')
print('Captured %dx%d image' % (
output.array.shape[1], output.array.shape[0]))
You can re-use the output to produce multiple arrays by emptying it with
``truncate(0)`` between captures::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as output:
camera.resolution = (1280, 720)
camera.capture(output, 'rgb')
print('Captured %dx%d image' % (
output.array.shape[1], output.array.shape[0]))
output.truncate(0)
camera.resolution = (640, 480)
camera.capture(output, 'rgb')
print('Captured %dx%d image' % (
output.array.shape[1], output.array.shape[0]))
If you are using the GPU resizer when capturing (with the *resize*
parameter of the various :meth:`~PiCamera.capture` methods), specify the
resized resolution as the optional *size* parameter when constructing the
array output::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
camera.resolution = (1280, 720)
with picamera.array.PiRGBArray(camera, size=(640, 360)) as output:
camera.capture(output, 'rgb', resize=(640, 360))
print('Captured %dx%d image' % (
output.array.shape[1], output.array.shape[0]))
"""
def flush(self):
super(PiRGBArray, self).flush()
self.array = bytes_to_rgb(self.getvalue(), self.size or self.camera.resolution)
class PiYUVArray(PiArrayOutput):
"""
Produces 3-dimensional YUV & RGB arrays from a YUV capture.
This custom output class can be used to easily obtain a 3-dimensional numpy
array, organized (rows, columns, channel), from an unencoded YUV capture.
The array is accessed via the :attr:`~PiArrayOutput.array` attribute. For
example::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
with picamera.array.PiYUVArray(camera) as output:
camera.capture(output, 'yuv')
print('Captured %dx%d image' % (
output.array.shape[1], output.array.shape[0]))
The :attr:`rgb_array` attribute can be queried for the equivalent RGB
array (conversion is performed using the `ITU-R BT.601`_ matrix)::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
with picamera.array.PiYUVArray(camera) as output:
camera.resolution = (1280, 720)
camera.capture(output, 'yuv')
print(output.array.shape)
print(output.rgb_array.shape)
If you are using the GPU resizer when capturing (with the *resize*
parameter of the various :meth:`~picamera.PiCamera.capture` methods),
specify the resized resolution as the optional *size* parameter when
constructing the array output::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
camera.resolution = (1280, 720)
with picamera.array.PiYUVArray(camera, size=(640, 360)) as output:
camera.capture(output, 'yuv', resize=(640, 360))
print('Captured %dx%d image' % (
output.array.shape[1], output.array.shape[0]))
.. _ITU-R BT.601: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
"""
def __init__(self, camera, size=None):
super(PiYUVArray, self).__init__(camera, size)
self._rgb = None
def flush(self):
super(PiYUVArray, self).flush()
self.array = bytes_to_yuv(self.getvalue(), self.size or self.camera.resolution)
self._rgb = None
@property
def rgb_array(self):
if self._rgb is None:
# Apply the standard biases
YUV = self.array.astype(float)
YUV[:, :, 0] = YUV[:, :, 0] - 16 # Offset Y by 16
YUV[:, :, 1:] = YUV[:, :, 1:] - 128 # Offset UV by 128
# YUV conversion matrix from ITU-R BT.601 version (SDTV)
# Y U V
M = np.array([[1.164, 0.000, 1.596], # R
[1.164, -0.392, -0.813], # G
[1.164, 2.017, 0.000]]) # B
# Calculate the dot product with the matrix to produce RGB output,
# clamp the results to byte range and convert to bytes
self._rgb = YUV.dot(M.T).clip(0, 255).astype(np.uint8)
return self._rgb
class BroadcomRawHeader(ct.Structure):
_fields_ = [
('name', ct.c_char * 32),
('width', ct.c_uint16),
('height', ct.c_uint16),
('padding_right', ct.c_uint16),
('padding_down', ct.c_uint16),
('dummy', ct.c_uint32 * 6),
('transform', ct.c_uint16),
('format', ct.c_uint16),
('bayer_order', ct.c_uint8),
('bayer_format', ct.c_uint8),
]
class PiBayerArray(PiArrayOutput):
"""
Produces a 3-dimensional RGB array from raw Bayer data.
This custom output class is intended to be used with the
:meth:`~picamera.PiCamera.capture` method, with the *bayer* parameter set
to ``True``, to include raw Bayer data in the JPEG output. The class
strips out the raw data, and constructs a numpy array from it. The
resulting data is accessed via the :attr:`~PiArrayOutput.array` attribute::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
with picamera.array.PiBayerArray(camera) as output:
camera.capture(output, 'jpeg', bayer=True)
print(output.array.shape)
The *output_dims* parameter specifies whether the resulting array is
three-dimensional (the default, or when *output_dims* is 3), or
two-dimensional (when *output_dims* is 2). The three-dimensional data is
already separated into the three color planes, whilst the two-dimensional
variant is not (in which case you need to know the Bayer ordering to
accurately deal with the results).
.. note::
Bayer data is *usually* full resolution, so the resulting array usually
has the shape (1944, 2592, 3) with the V1 module, or (2464, 3280, 3)
with the V2 module (if two-dimensional output is requested the
3-layered color dimension is omitted). If the camera's
:attr:`~picamera.PiCamera.sensor_mode` has been forced to something
other than 0, then the output will be the native size for the requested
sensor mode.
This also implies that the optional *size* parameter (for specifying a
resizer resolution) is not available with this array class.
As the sensor records 10-bit values, the array uses the unsigned 16-bit
integer data type.
By default, `de-mosaicing`_ is **not** performed; if the resulting array is
viewed it will therefore appear dark and too green (due to the green bias
in the `Bayer pattern`_). A trivial weighted-average demosaicing algorithm
is provided in the :meth:`demosaic` method::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
with picamera.array.PiBayerArray(camera) as output:
camera.capture(output, 'jpeg', bayer=True)
print(output.demosaic().shape)
Viewing the result of the de-mosaiced data will look more normal but still
considerably worse quality than the regular camera output (as none of the
other usual post-processing steps like auto-exposure, white-balance,
vignette compensation, and smoothing have been performed).
.. versionchanged:: 1.13
This class now supports the V2 module properly, and handles flipped
images, and forced sensor modes correctly.
.. _de-mosaicing: https://en.wikipedia.org/wiki/Demosaicing
.. _Bayer pattern: https://en.wikipedia.org/wiki/Bayer_filter
"""
BAYER_OFFSETS = {
0: ((0, 0), (1, 0), (0, 1), (1, 1)),
1: ((1, 0), (0, 0), (1, 1), (0, 1)),
2: ((1, 1), (0, 1), (1, 0), (0, 0)),
3: ((0, 1), (1, 1), (0, 0), (1, 0)),
}
def __init__(self, camera, output_dims=3):
super(PiBayerArray, self).__init__(camera, size=None)
if not (2 <= output_dims <= 3):
raise PiCameraValueError('output_dims must be 2 or 3')
self._demo = None
self._header = None
self._output_dims = output_dims
@property
def output_dims(self):
return self._output_dims
def _to_3d(self, array):
array_3d = np.zeros(array.shape + (3,), dtype=array.dtype)
(
(ry, rx), (gy, gx), (Gy, Gx), (by, bx)
) = PiBayerArray.BAYER_OFFSETS[self._header.bayer_order]
array_3d[ry::2, rx::2, 0] = array[ry::2, rx::2] # Red
array_3d[gy::2, gx::2, 1] = array[gy::2, gx::2] # Green
array_3d[Gy::2, Gx::2, 1] = array[Gy::2, Gx::2] # Green
array_3d[by::2, bx::2, 2] = array[by::2, bx::2] # Blue
return array_3d
def flush(self):
super(PiBayerArray, self).flush()
self._demo = None
offset = {
'OV5647': {
0: 6404096,
1: 2717696,
2: 6404096,
3: 6404096,
4: 1625600,
5: 1233920,
6: 445440,
7: 445440,
},
'IMX219': {
0: 10270208,
1: 2678784,
2: 10270208,
3: 10270208,
4: 2628608,
5: 1963008,
6: 1233920,
7: 445440,
},
}[self.camera.revision.upper()][self.camera.sensor_mode]
data = self.getvalue()[-offset:]
if data[:4] != b'BRCM':
raise PiCameraValueError('Unable to locate Bayer data at end of buffer')
# Extract header (with bayer order and other interesting bits), which
# is 176 bytes from start of bayer data, and pixel data which 32768
# bytes from start of bayer data
self._header = BroadcomRawHeader.from_buffer_copy(
data[176:176 + ct.sizeof(BroadcomRawHeader)])
data = np.frombuffer(data, dtype=np.uint8, offset=32768)
# Reshape and crop the data. The crop's width is multiplied by 5/4 to
# deal with the packed 10-bit format; the shape's width is calculated
# in a similar fashion but with padding included (which involves
# several additional padding steps)
crop = mo.PiResolution(
self._header.width * 5 // 4,
self._header.height)
shape = mo.PiResolution(
(((self._header.width + self._header.padding_right) * 5) + 3) // 4,
(self._header.height + self._header.padding_down)
).pad()
data = data.reshape((shape.height, shape.width))[:crop.height, :crop.width]
# Unpack 10-bit values; every 5 bytes contains the high 8-bits of 4
# values followed by the low 2-bits of 4 values packed into the fifth
# byte
data = data.astype(np.uint16) << 2
for byte in range(4):
data[:, byte::5] |= ((data[:, 4::5] >> (byte * 2)) & 3)
self.array = np.zeros(
(data.shape[0], data.shape[1] * 4 // 5), dtype=np.uint16)
for i in range(4):
self.array[:, i::4] = data[:, i::5]
if self.output_dims == 3:
self.array = self._to_3d(self.array)
def demosaic(self):
"""
Perform a rudimentary `de-mosaic`_ of ``self.array``, returning the
result as a new array. The result of the demosaic is *always* three
dimensional, with the last dimension being the color planes (see
*output_dims* parameter on the constructor).
.. _de-mosaic: https://en.wikipedia.org/wiki/Demosaicing
"""
if self._demo is None:
# Construct 3D representation of Bayer data (if necessary)
if self.output_dims == 2:
array_3d = self._to_3d(self.array)
else:
array_3d = self.array
# Construct representation of the bayer pattern
bayer = np.zeros(array_3d.shape, dtype=np.uint8)
(
(ry, rx), (gy, gx), (Gy, Gx), (by, bx)
) = PiBayerArray.BAYER_OFFSETS[self._header.bayer_order]
bayer[ry::2, rx::2, 0] = 1 # Red
bayer[gy::2, gx::2, 1] = 1 # Green
bayer[Gy::2, Gx::2, 1] = 1 # Green
bayer[by::2, bx::2, 2] = 1 # Blue
# Allocate output array with same shape as data and set up some
# constants to represent the weighted average window
window = (3, 3)
borders = (window[0] - 1, window[1] - 1)
border = (borders[0] // 2, borders[1] // 2)
# Pad out the data and the bayer pattern (np.pad is faster but
# unavailable on the version of numpy shipped with Raspbian at the
# time of writing)
rgb = np.zeros((
array_3d.shape[0] + borders[0],
array_3d.shape[1] + borders[1],
array_3d.shape[2]), dtype=array_3d.dtype)
rgb[
border[0]:rgb.shape[0] - border[0],
border[1]:rgb.shape[1] - border[1],
:] = array_3d
bayer_pad = np.zeros((
array_3d.shape[0] + borders[0],
array_3d.shape[1] + borders[1],
array_3d.shape[2]), dtype=bayer.dtype)
bayer_pad[
border[0]:bayer_pad.shape[0] - border[0],
border[1]:bayer_pad.shape[1] - border[1],
:] = bayer
bayer = bayer_pad
# For each plane in the RGB data, construct a view over the plane
# of 3x3 matrices. Then do the same for the bayer array and use
# Einstein summation to get the weighted average
self._demo = np.empty(array_3d.shape, dtype=array_3d.dtype)
for plane in range(3):
p = rgb[..., plane]
b = bayer[..., plane]
pview = as_strided(p, shape=(
p.shape[0] - borders[0],
p.shape[1] - borders[1]) + window, strides=p.strides * 2)
bview = as_strided(b, shape=(
b.shape[0] - borders[0],
b.shape[1] - borders[1]) + window, strides=b.strides * 2)
psum = np.einsum('ijkl->ij', pview)
bsum = np.einsum('ijkl->ij', bview)
self._demo[..., plane] = psum // bsum
return self._demo
class PiMotionArray(PiArrayOutput):
"""
Produces a 3-dimensional array of motion vectors from the H.264 encoder.
This custom output class is intended to be used with the *motion_output*
parameter of the :meth:`~picamera.PiCamera.start_recording` method. Once
recording has finished, the class generates a 3-dimensional numpy array
organized as (frames, rows, columns) where ``rows`` and ``columns`` are the
number of rows and columns of `macro-blocks`_ (16x16 pixel blocks) in the
original frames. There is always one extra column of macro-blocks present
in motion vector data.
The data-type of the :attr:`~PiArrayOutput.array` is an (x, y, sad)
structure where ``x`` and ``y`` are signed 1-byte values, and ``sad`` is an
unsigned 2-byte value representing the `sum of absolute differences`_ of
the block. For example::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
with picamera.array.PiMotionArray(camera) as output:
camera.resolution = (640, 480)
camera.start_recording(
'/dev/null', format='h264', motion_output=output)
camera.wait_recording(30)
camera.stop_recording()
print('Captured %d frames' % output.array.shape[0])
print('Frames are %dx%d blocks big' % (
output.array.shape[2], output.array.shape[1]))
If you are using the GPU resizer with your recording, use the optional
*size* parameter to specify the resizer's output resolution when
constructing the array::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
with picamera.array.PiMotionArray(camera, size=(320, 240)) as output:
camera.start_recording(
'/dev/null', format='h264', motion_output=output,
resize=(320, 240))
camera.wait_recording(30)
camera.stop_recording()
print('Captured %d frames' % output.array.shape[0])
print('Frames are %dx%d blocks big' % (
output.array.shape[2], output.array.shape[1]))
.. note::
This class is not suitable for real-time analysis of motion vector
data. See the :class:`PiMotionAnalysis` class instead.
.. _macro-blocks: https://en.wikipedia.org/wiki/Macroblock
.. _sum of absolute differences: https://en.wikipedia.org/wiki/Sum_of_absolute_differences
"""
def flush(self):
super(PiMotionArray, self).flush()
width, height = self.size or self.camera.resolution
cols = ((width + 15) // 16) + 1
rows = (height + 15) // 16
b = self.getvalue()
frames = len(b) // (cols * rows * motion_dtype.itemsize)
self.array = np.frombuffer(b, dtype=motion_dtype).reshape((frames, rows, cols))
class PiAnalysisOutput(io.IOBase):
"""
Base class for analysis outputs.
This class extends :class:`io.IOBase` with a stub :meth:`analyze` method
which will be called for each frame output. In this base implementation the
method simply raises :exc:`NotImplementedError`.
"""
def __init__(self, camera, size=None):
super(PiAnalysisOutput, self).__init__()
self.camera = camera
self.size = size
def writable(self):
return True
def write(self, b):
return len(b)
def analyze(self, array):
"""
Stub method for users to override.
"""
try:
self.analyse(array)
warnings.warn(
PiCameraDeprecated(
'The analyse method is deprecated; use analyze (US '
'English spelling) instead'))
except NotImplementedError:
raise
def analyse(self, array):
"""
Deprecated alias of :meth:`analyze`.
"""
raise NotImplementedError
class PiRGBAnalysis(PiAnalysisOutput):
"""
Provides a basis for per-frame RGB analysis classes.
This custom output class is intended to be used with the
:meth:`~picamera.PiCamera.start_recording` method when it is called with
*format* set to ``'rgb'`` or ``'bgr'``. While recording is in progress, the
:meth:`~PiAnalysisOutput.write` method converts incoming frame data into a
numpy array and calls the stub :meth:`~PiAnalysisOutput.analyze` method
with the resulting array (this deliberately raises
:exc:`NotImplementedError` in this class; you must override it in your
descendent class).
.. note::
If your overridden :meth:`~PiAnalysisOutput.analyze` method runs slower
than the required framerate (e.g. 33.333ms when framerate is 30fps)
then the camera's effective framerate will be reduced. Furthermore,
this doesn't take into account the overhead of picamera itself so in
practice your method needs to be a bit faster still.
The array passed to :meth:`~PiAnalysisOutput.analyze` is organized as
(rows, columns, channel) where the channels 0, 1, and 2 are R, G, and B
respectively (or B, G, R if *format* is ``'bgr'``).
"""
def write(self, b):
result = super(PiRGBAnalysis, self).write(b)
self.analyze(bytes_to_rgb(b, self.size or self.camera.resolution))
return result
class PiYUVAnalysis(PiAnalysisOutput):
"""
Provides a basis for per-frame YUV analysis classes.
This custom output class is intended to be used with the
:meth:`~picamera.PiCamera.start_recording` method when it is called with
*format* set to ``'yuv'``. While recording is in progress, the
:meth:`~PiAnalysisOutput.write` method converts incoming frame data into a
numpy array and calls the stub :meth:`~PiAnalysisOutput.analyze` method
with the resulting array (this deliberately raises
:exc:`NotImplementedError` in this class; you must override it in your
descendent class).
.. note::
If your overridden :meth:`~PiAnalysisOutput.analyze` method runs slower
than the required framerate (e.g. 33.333ms when framerate is 30fps)
then the camera's effective framerate will be reduced. Furthermore,
this doesn't take into account the overhead of picamera itself so in
practice your method needs to be a bit faster still.
The array passed to :meth:`~PiAnalysisOutput.analyze` is organized as
(rows, columns, channel) where the channel 0 is Y (luminance), while 1 and
2 are U and V (chrominance) respectively. The chrominance values normally
have quarter resolution of the luminance values but this class makes all
channels equal resolution for ease of use.
"""
def write(self, b):
result = super(PiYUVAnalysis, self).write(b)
self.analyze(bytes_to_yuv(b, self.size or self.camera.resolution))
return result
class PiMotionAnalysis(PiAnalysisOutput):
"""
Provides a basis for real-time motion analysis classes.
This custom output class is intended to be used with the *motion_output*
parameter of the :meth:`~picamera.PiCamera.start_recording` method. While
recording is in progress, the write method converts incoming motion data
into numpy arrays and calls the stub :meth:`~PiAnalysisOutput.analyze`
method with the resulting array (which deliberately raises
:exc:`NotImplementedError` in this class).
.. note::
If your overridden :meth:`~PiAnalysisOutput.analyze` method runs slower
than the required framerate (e.g. 33.333ms when framerate is 30fps)
then the camera's effective framerate will be reduced. Furthermore,
this doesn't take into account the overhead of picamera itself so in
practice your method needs to be a bit faster still.
The array passed to :meth:`~PiAnalysisOutput.analyze` is organized as
(rows, columns) where ``rows`` and ``columns`` are the number of rows and
columns of `macro-blocks`_ (16x16 pixel blocks) in the original frames.
There is always one extra column of macro-blocks present in motion vector
data.
The data-type of the array is an (x, y, sad) structure where ``x`` and
``y`` are signed 1-byte values, and ``sad`` is an unsigned 2-byte value
representing the `sum of absolute differences`_ of the block.
An example of a crude motion detector is given below::
import numpy as np
import picamera
import picamera.array
class DetectMotion(picamera.array.PiMotionAnalysis):
def analyze(self, a):
a = np.sqrt(
np.square(a['x'].astype(np.float)) +
np.square(a['y'].astype(np.float))
).clip(0, 255).astype(np.uint8)
# If there're more than 10 vectors with a magnitude greater
# than 60, then say we've detected motion
if (a > 60).sum() > 10:
print('Motion detected!')
with picamera.PiCamera() as camera:
with DetectMotion(camera) as output:
camera.resolution = (640, 480)
camera.start_recording(
'/dev/null', format='h264', motion_output=output)
camera.wait_recording(30)
camera.stop_recording()
You can use the optional *size* parameter to specify the output resolution
of the GPU resizer, if you are using the *resize* parameter of
:meth:`~picamera.PiCamera.start_recording`.
"""
def __init__(self, camera, size=None):
super(PiMotionAnalysis, self).__init__(camera, size)
self.cols = None
self.rows = None
def write(self, b):
result = super(PiMotionAnalysis, self).write(b)
if self.cols is None:
width, height = self.size or self.camera.resolution
self.cols = ((width + 15) // 16) + 1
self.rows = (height + 15) // 16
self.analyze(
np.frombuffer(b, dtype=motion_dtype).\
reshape((self.rows, self.cols)))
return result
class MMALArrayBuffer(mo.MMALBuffer):
__slots__ = ('_shape',)
def __init__(self, port, buf):
super(MMALArrayBuffer, self).__init__(buf)
width = port._format[0].es[0].video.width
height = port._format[0].es[0].video.height
bpp = self.size // (width * height)
self.offset = 0
self.length = width * height * bpp
self._shape = (height, width, bpp)
def __enter__(self):
mmal_check(
mmal.mmal_buffer_header_mem_lock(self._buf),
prefix='unable to lock buffer header memory')
assert self.offset == 0
return np.frombuffer(
ct.cast(
self._buf[0].data,
ct.POINTER(ct.c_uint8 * self._buf[0].alloc_size)).contents,
dtype=np.uint8, count=self.length).reshape(self._shape)
def __exit__(self, *exc):
mmal.mmal_buffer_header_mem_unlock(self._buf)
return False
class PiArrayTransform(mo.MMALPythonComponent):
"""
A derivative of :class:`~picamera.mmalobj.MMALPythonComponent` which eases
the construction of custom MMAL transforms by representing buffer data as
numpy arrays. The *formats* parameter specifies the accepted input
formats as a sequence of strings (default: 'rgb', 'bgr', 'rgba', 'bgra').
Override the :meth:`transform` method to modify buffers sent to the
component, then place it in your MMAL pipeline as you would a normal
encoder.
"""
__slots__ = ()
def __init__(self, formats=('rgb', 'bgr', 'rgba', 'bgra')):
super(PiArrayTransform, self).__init__()
if isinstance(formats, bytes):
formats = formats.decode('ascii')
if isinstance(formats, str):
formats = (formats,)
try:
formats = {
{
'rgb': mmal.MMAL_ENCODING_RGB24,
'bgr': mmal.MMAL_ENCODING_BGR24,
'rgba': mmal.MMAL_ENCODING_RGBA,
'bgra': mmal.MMAL_ENCODING_BGRA,
}[fmt]
for fmt in formats
}
except KeyError as e:
raise PiCameraValueError(
'PiArrayTransform cannot handle format %s' % str(e))
self.inputs[0].supported_formats = formats
self.outputs[0].supported_formats = formats
def _callback(self, port, source_buf):
try:
target_buf = self.outputs[0].get_buffer(False)
except PiCameraPortDisabled:
return False
if target_buf:
target_buf.copy_meta(source_buf)
result = self.transform(
MMALArrayBuffer(port, source_buf._buf),
MMALArrayBuffer(self.outputs[0], target_buf._buf))
try:
self.outputs[0].send_buffer(target_buf)
except PiCameraPortDisabled:
return False
return False
def transform(self, source, target):
"""
This method will be called for every frame passing through the
transform. The *source* and *target* parameters represent buffers from
the input and output ports of the transform respectively. They will be
derivatives of :class:`~picamera.mmalobj.MMALBuffer` which return a
3-dimensional numpy array when used as context managers. For example::
def transform(self, source, target):
with source as source_array, target as target_array:
# Copy the source array data to the target
target_array[...] = source_array
# Draw a box around the edges
target_array[0, :, :] = 0xff
target_array[-1, :, :] = 0xff
target_array[:, 0, :] = 0xff
target_array[:, -1, :] = 0xff
return False
The target buffer's meta-data starts out as a copy of the source
buffer's meta-data, but the target buffer's data starts out
uninitialized.
"""
return False

991
picamera/bcm_host.py Normal file
View file

@ -0,0 +1,991 @@
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python header conversion
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Original headers
# Copyright (c) 2012, Broadcom Europe Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import ctypes as ct
import warnings
_lib = ct.CDLL('libbcm_host.so')
# bcm_host.h #################################################################
bcm_host_init = _lib.bcm_host_init
bcm_host_init.argtypes = []
bcm_host_init.restype = None
bcm_host_deinit = _lib.bcm_host_deinit
bcm_host_deinit.argtypes = []
bcm_host_deinit.restype = None
graphics_get_display_size = _lib.graphics_get_display_size
graphics_get_display_size.argtypes = [ct.c_uint16, ct.POINTER(ct.c_uint32), ct.POINTER(ct.c_uint32)]
graphics_get_display_size.restype = ct.c_int32
# vchi.h #####################################################################
VCHI_INSTANCE_T = ct.c_void_p
VCHI_CONNECTION_T = ct.c_void_p
# vcos_platform.h ############################################################
VCOS_UNSIGNED = ct.c_uint32
# vcos_types.h ###############################################################
VCOS_STATUS_T = ct.c_uint32 # enum
(
VCOS_SUCCESS,
VCOS_EAGAIN,
VCOS_ENOENT,
VCOS_ENOSPC,
VCOS_EINVAL,
VCOS_EACCESS,
VCOS_ENOMEM,
VCOS_ENOSYS,
VCOS_EEXIST,
VCOS_ENXIO,
VCOS_EINTR,
) = range(11)
vcos_bool_t = ct.c_int32
vcos_fourcc_t = ct.c_int32
def VCOS_ALIGN_UP(value, round_to):
# Note: this function assumes round_to is some power of 2.
return (value + (round_to - 1)) & ~(round_to - 1)
def VCOS_ALIGN_DOWN(value, round_to):
# Note: this function assumes round_to is some power of 2.
return value & ~(round_to - 1)
# vc_image_types.h ###########################################################
class VC_RECT_T(ct.Structure):
_fields_ = [
('x', ct.c_int32),
('y', ct.c_int32),
('width', ct.c_int32),
('height', ct.c_int32),
]
VC_IMAGE_TYPE_T = ct.c_uint32 # enum
(
VC_IMAGE_MIN,
VC_IMAGE_RGB565,
VC_IMAGE_1BPP,
VC_IMAGE_YUV420,
VC_IMAGE_48BPP,
VC_IMAGE_RGB888,
VC_IMAGE_8BPP,
VC_IMAGE_4BPP,
VC_IMAGE_3D32,
VC_IMAGE_3D32B,
VC_IMAGE_3D32MAT,
VC_IMAGE_RGB2X9,
VC_IMAGE_RGB666,
VC_IMAGE_PAL4_OBSOLETE,
VC_IMAGE_PAL8_OBSOLETE,
VC_IMAGE_RGBA32,
VC_IMAGE_YUV422,
VC_IMAGE_RGBA565,
VC_IMAGE_RGBA16,
VC_IMAGE_YUV_UV,
VC_IMAGE_TF_RGBA32,
VC_IMAGE_TF_RGBX32,
VC_IMAGE_TF_FLOAT,
VC_IMAGE_TF_RGBA16,
VC_IMAGE_TF_RGBA5551,
VC_IMAGE_TF_RGB565,
VC_IMAGE_TF_YA88,
VC_IMAGE_TF_BYTE,
VC_IMAGE_TF_PAL8,
VC_IMAGE_TF_PAL4,
VC_IMAGE_TF_ETC1,
VC_IMAGE_BGR888,
VC_IMAGE_BGR888_NP,
VC_IMAGE_BAYER,
VC_IMAGE_CODEC,
VC_IMAGE_YUV_UV32,
VC_IMAGE_TF_Y8,
VC_IMAGE_TF_A8,
VC_IMAGE_TF_SHORT,
VC_IMAGE_TF_1BPP,
VC_IMAGE_OPENGL,
VC_IMAGE_YUV444I,
VC_IMAGE_YUV422PLANAR,
VC_IMAGE_ARGB8888,
VC_IMAGE_XRGB8888,
VC_IMAGE_YUV422YUYV,
VC_IMAGE_YUV422YVYU,
VC_IMAGE_YUV422UYVY,
VC_IMAGE_YUV422VYUY,
VC_IMAGE_RGBX32,
VC_IMAGE_RGBX8888,
VC_IMAGE_BGRX8888,
VC_IMAGE_YUV420SP,
VC_IMAGE_YUV444PLANAR,
VC_IMAGE_TF_U8,
VC_IMAGE_TF_V8,
VC_IMAGE_MAX,
) = range(57)
TRANSFORM_HFLIP = 1 << 0
TRANSFORM_VFLIP = 1 << 1
TRANSFORM_TRANSPOSE = 1 << 2
VC_IMAGE_TRANSFORM_T = ct.c_uint32 # enum
VC_IMAGE_ROT0 = 0
VC_IMAGE_MIRROR_ROT0 = TRANSFORM_HFLIP
VC_IMAGE_MIRROR_ROT180 = TRANSFORM_VFLIP
VC_IMAGE_ROT180 = TRANSFORM_HFLIP | TRANSFORM_VFLIP
VC_IMAGE_MIRROR_ROT90 = TRANSFORM_TRANSPOSE
VC_IMAGE_ROT270 = TRANSFORM_TRANSPOSE | TRANSFORM_HFLIP
VC_IMAGE_ROT90 = TRANSFORM_TRANSPOSE | TRANSFORM_VFLIP
VC_IMAGE_MIRROR_ROT270 = TRANSFORM_TRANSPOSE | TRANSFORM_HFLIP | TRANSFORM_VFLIP
VC_IMAGE_BAYER_ORDER_T = ct.c_uint32 # enum
(
VC_IMAGE_BAYER_RGGB,
VC_IMAGE_BAYER_GBRG,
VC_IMAGE_BAYER_BGGR,
VC_IMAGE_BAYER_GRBG,
) = range(4)
VC_IMAGE_BAYER_FORMAT_T = ct.c_uint32 # enum
(
VC_IMAGE_BAYER_RAW6,
VC_IMAGE_BAYER_RAW7,
VC_IMAGE_BAYER_RAW8,
VC_IMAGE_BAYER_RAW10,
VC_IMAGE_BAYER_RAW12,
VC_IMAGE_BAYER_RAW14,
VC_IMAGE_BAYER_RAW16,
VC_IMAGE_BAYER_RAW10_8,
VC_IMAGE_BAYER_RAW12_8,
VC_IMAGE_BAYER_RAW14_8,
VC_IMAGE_BAYER_RAW10L,
VC_IMAGE_BAYER_RAW12L,
VC_IMAGE_BAYER_RAW14L,
VC_IMAGE_BAYER_RAW16_BIG_ENDIAN,
VC_IMAGE_BAYER_RAW4,
) = range(15)
# vc_display_types.h #########################################################
VCOS_DISPLAY_INPUT_FORMAT_T = ct.c_uint32 # enum
(
VCOS_DISPLAY_INPUT_FORMAT_INVALID,
VCOS_DISPLAY_INPUT_FORMAT_RGB888,
VCOS_DISPLAY_INPUT_FORMAT_RGB565
) = range(3)
DISPLAY_INPUT_FORMAT_INVALID = VCOS_DISPLAY_INPUT_FORMAT_INVALID
DISPLAY_INPUT_FORMAT_RGB888 = VCOS_DISPLAY_INPUT_FORMAT_RGB888
DISPLAY_INPUT_FORMAT_RGB565 = VCOS_DISPLAY_INPUT_FORMAT_RGB565
DISPLAY_INPUT_FORMAT_T = VCOS_DISPLAY_INPUT_FORMAT_T
DISPLAY_3D_FORMAT_T = ct.c_uint32 # enum
(
DISPLAY_3D_UNSUPPORTED,
DISPLAY_3D_INTERLEAVED,
DISPLAY_3D_SBS_FULL_AUTO,
DISPLAY_3D_SBS_HALF_HORIZ,
DISPLAY_3D_TB_HALF,
DISPLAY_3D_FRAME_PACKING,
DISPLAY_3D_FRAME_SEQUENTIAL,
DISPLAY_3D_FORMAT_MAX,
) = range(8)
DISPLAY_INTERFACE_T = ct.c_uint32 # enum
(
DISPLAY_INTERFACE_MIN,
DISPLAY_INTERFACE_SMI,
DISPLAY_INTERFACE_DPI,
DISPLAY_INTERFACE_DSI,
DISPLAY_INTERFACE_LVDS,
DISPLAY_INTERFACE_MAX,
) = range(6)
DISPLAY_DITHER_T = ct.c_uint32 # enum
(
DISPLAY_DITHER_NONE,
DISPLAY_DITHER_RGB666,
DISPLAY_DITHER_RGB565,
DISPLAY_DITHER_RGB555,
DISPLAY_DITHER_MAX,
) = range(5)
class DISPLAY_INFO_T(ct.Structure):
_fields_ = [
('type', DISPLAY_INTERFACE_T),
('width', ct.c_uint32),
('height', ct.c_uint32),
('input_format', DISPLAY_INPUT_FORMAT_T),
('interlaced', ct.c_uint32),
('output_dither', DISPLAY_DITHER_T),
('pixel_freq', ct.c_uint32),
('line_rate', ct.c_uint32),
('format_3d', DISPLAY_3D_FORMAT_T),
('use_pixelvalve_1', ct.c_uint32),
('dsi_video_mode', ct.c_uint32),
('hvs_channel', ct.c_uint32),
]
# vc_dispmanx_types.h ########################################################
DISPMANX_DISPLAY_HANDLE_T = ct.c_uint32
DISPMANX_UPDATE_HANDLE_T = ct.c_uint32
DISPMANX_ELEMENT_HANDLE_T = ct.c_uint32
DISPMANX_RESOURCE_HANDLE_T = ct.c_uint32
DISPMANX_PROTECTION_T = ct.c_uint32
DISPMANX_TRANSFORM_T = ct.c_uint32 # enum
DISPMANX_NO_ROTATE = 0
DISPMANX_ROTATE_90 = 1
DISPMANX_ROTATE_180 = 2
DISPMANX_ROTATE_270 = 3
DISPMANX_FLIP_HRIZ = 1 << 16
DISPMANX_FLIP_VERT = 1 << 17
DISPMANX_STEREOSCOPIC_INVERT = 1 << 19
DISPMANX_STEREOSCOPIC_NONE = 0 << 20
DISPMANX_STEREOSCOPIC_MONO = 1 << 20
DISPMANX_STEREOSCOPIC_SBS = 2 << 20
DISPMANX_STEREOSCOPIC_TB = 3 << 20
DISPMANX_STEREOSCOPIC_MASK = 15 << 20
DISPMANX_SNAPSHOT_NO_YUV = 1 << 24
DISPMANX_SNAPSHOT_NO_RGB = 1 << 25
DISPMANX_SNAPSHOT_FILL = 1 << 26
DISPMANX_SNAPSHOT_SWAP_RED_BLUE = 1 << 27
DISPMANX_SNAPSHOT_PACK = 1 << 28
DISPMANX_FLAGS_ALPHA_T = ct.c_uint32 # enum
DISPMANX_FLAGS_ALPHA_FROM_SOURCE = 0
DISPMANX_FLAGS_ALPHA_FIXED_ALL_PIXELS = 1
DISPMANX_FLAGS_ALPHA_FIXED_NON_ZERO = 2
DISPMANX_FLAGS_ALPHA_FIXED_EXCEED_0X07 = 3
DISPMANX_FLAGS_ALPHA_PREMULT = 1 << 16
DISPMANX_FLAGS_ALPHA_MIX = 1 << 17
class DISPMANX_ALPHA_T(ct.Structure):
_fields_ = [
('flags', DISPMANX_FLAGS_ALPHA_T),
('opacity', ct.c_uint32),
('mask', ct.c_void_p),
]
class VC_DISPMANX_ALPHA_T(ct.Structure):
_fields_ = [
('flags', DISPMANX_FLAGS_ALPHA_T),
('opacity', ct.c_uint32),
('mask', DISPMANX_RESOURCE_HANDLE_T),
]
DISPMANX_FLAGS_CLAMP_T = ct.c_uint32 # enum
(
DISPMANX_FLAGS_CLAMP_NONE,
DISPMANX_FLAGS_CLAMP_LUMA_TRANSPARENT,
DISPMANX_FLAGS_CLAMP_TRANSPARENT,
DISPMANX_FLAGS_CLAMP_REPLACE,
) = range(4)
DISPMANX_FLAGS_KEYMASK_T = ct.c_uint32 # enum
DISPMANX_FLAGS_KEYMASK_OVERRIDE = 1
DISPMANX_FLAGS_KEYMASK_SMOOTH = 1 << 1
DISPMANX_FLAGS_KEYMASK_CR_INV = 1 << 2
DISPMANX_FLAGS_KEYMASK_CB_INV = 1 << 3
DISPMANX_FLAGS_KEYMASK_YY_INV = 1 << 4
class _YUV(ct.Structure):
_fields_ = [
('yy_upper', ct.c_uint8),
('yy_lower', ct.c_uint8),
('cr_upper', ct.c_uint8),
('cr_lower', ct.c_uint8),
('cb_upper', ct.c_uint8),
('cb_lower', ct.c_uint8),
]
class _RGB(ct.Structure):
_fields_ = [
('red_upper', ct.c_uint8),
('red_lower', ct.c_uint8),
('green_upper', ct.c_uint8),
('green_lower', ct.c_uint8),
('blue_upper', ct.c_uint8),
('blue_lower', ct.c_uint8),
]
class DISPMANX_CLAMP_KEYS_T(ct.Union):
_fields_ = [
('yuv', _YUV),
('rgb', _RGB),
]
class DISPMANX_CLAMP_T(ct.Structure):
_fields_ = [
('mode', DISPMANX_FLAGS_CLAMP_T),
('key_mask', DISPMANX_FLAGS_KEYMASK_T),
('key_value', DISPMANX_CLAMP_KEYS_T),
('replace_value', ct.c_uint32),
]
class DISPMANX_MODEINFO_T(ct.Structure):
_fields_ = [
('width', ct.c_int32),
('height', ct.c_int32),
('transform', DISPMANX_TRANSFORM_T),
('input_format', DISPLAY_INPUT_FORMAT_T),
('display_num', ct.c_uint32),
]
DISPMANX_CALLBACK_FUNC_T = ct.CFUNCTYPE(
None,
DISPMANX_UPDATE_HANDLE_T, ct.c_void_p)
DISPMANX_PROGRESS_CALLBACK_FUNC_T = ct.CFUNCTYPE(
None,
DISPMANX_UPDATE_HANDLE_T, ct.c_uint32, ct.c_void_p)
# vc_dispmanx.h ##############################################################
vc_dispmanx_stop = _lib.vc_dispmanx_stop
vc_dispmanx_stop.argtypes = []
vc_dispmanx_stop.restype = None
vc_dispmanx_rect_set = _lib.vc_dispmanx_rect_set
vc_dispmanx_rect_set.argtypes = [ct.POINTER(VC_RECT_T), ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32]
vc_dispmanx_rect_set.restype = ct.c_int
vc_dispmanx_resource_create = _lib.vc_dispmanx_resource_create
vc_dispmanx_resource_create.argtypes = [VC_IMAGE_TYPE_T, ct.c_uint32, ct.c_uint32, ct.POINTER(ct.c_uint32)]
vc_dispmanx_resource_create.restype = DISPMANX_RESOURCE_HANDLE_T
vc_dispmanx_resource_write_data = _lib.vc_dispmanx_resource_write_data
vc_dispmanx_resource_write_data.argtypes = [DISPMANX_RESOURCE_HANDLE_T, VC_IMAGE_TYPE_T, ct.c_int, ct.c_void_p, ct.POINTER(VC_RECT_T)]
vc_dispmanx_resource_write_data.restype = ct.c_int
vc_dispmanx_resource_read_data = _lib.vc_dispmanx_resource_read_data
vc_dispmanx_resource_read_data.argtypes = [DISPMANX_RESOURCE_HANDLE_T, ct.POINTER(VC_RECT_T), ct.c_void_p, ct.c_uint32]
vc_dispmanx_resource_read_data.restype = ct.c_int
vc_dispmanx_resource_delete = _lib.vc_dispmanx_resource_delete
vc_dispmanx_resource_delete.argtypes = [DISPMANX_RESOURCE_HANDLE_T]
vc_dispmanx_resource_delete.restype = ct.c_int
vc_dispmanx_display_open = _lib.vc_dispmanx_display_open
vc_dispmanx_display_open.argtypes = [ct.c_uint32]
vc_dispmanx_display_open.restype = DISPMANX_DISPLAY_HANDLE_T
vc_dispmanx_display_open_mode = _lib.vc_dispmanx_display_open_mode
vc_dispmanx_display_open_mode.argtypes = [ct.c_uint32, ct.c_uint32]
vc_dispmanx_display_open_mode.restype = DISPMANX_DISPLAY_HANDLE_T
vc_dispmanx_display_open_offscreen = _lib.vc_dispmanx_display_open_offscreen
vc_dispmanx_display_open_offscreen.argtypes = [DISPMANX_RESOURCE_HANDLE_T, DISPMANX_TRANSFORM_T]
vc_dispmanx_display_open_offscreen.restype = DISPMANX_DISPLAY_HANDLE_T
vc_dispmanx_display_reconfigure = _lib.vc_dispmanx_display_reconfigure
vc_dispmanx_display_reconfigure.argtypes = [DISPMANX_DISPLAY_HANDLE_T, ct.c_uint32]
vc_dispmanx_display_reconfigure.restype = ct.c_int
vc_dispmanx_display_set_destination = _lib.vc_dispmanx_display_set_destination
vc_dispmanx_display_set_destination.argtypes = [DISPMANX_DISPLAY_HANDLE_T, DISPMANX_RESOURCE_HANDLE_T]
vc_dispmanx_display_set_destination.restype = ct.c_int
vc_dispmanx_display_set_background = _lib.vc_dispmanx_display_set_background
vc_dispmanx_display_set_background.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_DISPLAY_HANDLE_T, ct.c_uint8, ct.c_uint8, ct.c_uint8]
vc_dispmanx_display_set_background.restype = ct.c_int
vc_dispmanx_display_get_info = _lib.vc_dispmanx_display_get_info
vc_dispmanx_display_get_info.argtypes = [DISPMANX_DISPLAY_HANDLE_T, ct.POINTER(DISPMANX_MODEINFO_T)]
vc_dispmanx_display_get_info.restype = ct.c_int
vc_dispmanx_display_close = _lib.vc_dispmanx_display_close
vc_dispmanx_display_close.argtypes = [DISPMANX_DISPLAY_HANDLE_T]
vc_dispmanx_display_close.restype = ct.c_int
vc_dispmanx_update_start = _lib.vc_dispmanx_update_start
vc_dispmanx_update_start.argtypes = [ct.c_int32]
vc_dispmanx_update_start.restype = DISPMANX_UPDATE_HANDLE_T
vc_dispmanx_element_add = _lib.vc_dispmanx_element_add
vc_dispmanx_element_add.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_DISPLAY_HANDLE_T, ct.c_int32, ct.POINTER(VC_RECT_T), DISPMANX_RESOURCE_HANDLE_T, ct.POINTER(VC_RECT_T), DISPMANX_PROTECTION_T, VC_DISPMANX_ALPHA_T, DISPMANX_CLAMP_T, DISPMANX_TRANSFORM_T]
vc_dispmanx_element_add.restype = DISPMANX_ELEMENT_HANDLE_T
vc_dispmanx_element_change_source = _lib.vc_dispmanx_element_change_source
vc_dispmanx_element_change_source.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_ELEMENT_HANDLE_T, DISPMANX_RESOURCE_HANDLE_T]
vc_dispmanx_element_change_source.restype = ct.c_int
vc_dispmanx_element_change_layer = _lib.vc_dispmanx_element_change_layer
vc_dispmanx_element_change_layer.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_ELEMENT_HANDLE_T, ct.c_int32]
vc_dispmanx_element_change_layer.restype = ct.c_int
vc_dispmanx_element_modified = _lib.vc_dispmanx_element_modified
vc_dispmanx_element_modified.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_ELEMENT_HANDLE_T, ct.POINTER(VC_RECT_T)]
vc_dispmanx_element_modified.restype = ct.c_int
vc_dispmanx_element_remove = _lib.vc_dispmanx_element_remove
vc_dispmanx_element_remove.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_ELEMENT_HANDLE_T]
vc_dispmanx_element_remove.restype = ct.c_int
vc_dispmanx_update_submit = _lib.vc_dispmanx_update_submit
vc_dispmanx_update_submit.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_CALLBACK_FUNC_T, ct.c_void_p]
vc_dispmanx_update_submit.restype = ct.c_int
vc_dispmanx_update_submit_sync = _lib.vc_dispmanx_update_submit_sync
vc_dispmanx_update_submit_sync.argtypes = [DISPMANX_UPDATE_HANDLE_T]
vc_dispmanx_update_submit_sync.restype = ct.c_int
vc_dispmanx_query_image_formats = _lib.vc_dispmanx_query_image_formats
vc_dispmanx_query_image_formats.argtypes = [ct.POINTER(ct.c_uint32)]
vc_dispmanx_query_image_formats.restype = ct.c_int
vc_dispmanx_element_change_attributes = _lib.vc_dispmanx_element_change_attributes
vc_dispmanx_element_change_attributes.argtypes = [DISPMANX_UPDATE_HANDLE_T, DISPMANX_ELEMENT_HANDLE_T, ct.c_uint32, ct.c_int32, ct.c_uint8, ct.POINTER(VC_RECT_T), ct.POINTER(VC_RECT_T), DISPMANX_RESOURCE_HANDLE_T, DISPMANX_TRANSFORM_T]
vc_dispmanx_element_change_attributes.restype = ct.c_int
vc_vchi_dispmanx_init = _lib.vc_vchi_dispmanx_init
vc_vchi_dispmanx_init.argtypes = [VCHI_INSTANCE_T, ct.POINTER(VCHI_CONNECTION_T), ct.c_uint32]
vc_vchi_dispmanx_init.restype = None
vc_dispmanx_snapshot = _lib.vc_dispmanx_snapshot
vc_dispmanx_snapshot.argtypes = [DISPMANX_DISPLAY_HANDLE_T, DISPMANX_RESOURCE_HANDLE_T, DISPMANX_TRANSFORM_T]
vc_dispmanx_snapshot.restype = ct.c_int
vc_dispmanx_resource_set_palette = _lib.vc_dispmanx_resource_set_palette
vc_dispmanx_resource_set_palette.argtypes = [DISPMANX_RESOURCE_HANDLE_T, ct.c_void_p, ct.c_int, ct.c_int]
vc_dispmanx_resource_set_palette.restype = ct.c_int
vc_dispmanx_vsync_callback = _lib.vc_dispmanx_vsync_callback
vc_dispmanx_vsync_callback.argtypes = [DISPMANX_DISPLAY_HANDLE_T, DISPMANX_CALLBACK_FUNC_T, ct.c_void_p]
vc_dispmanx_vsync_callback.restype = ct.c_int
# vc_cec.h ###################################################################
CEC_BROADCAST_ADDR = 0xF
CEC_TV_ADDRESS = 0
CEC_MAX_XMIT_LENGTH = 15
CEC_CLEAR_ADDR = 0xFFFF
CEC_VERSION = 0x4
CEC_VENDOR_ID_BROADCOM = 0x18C086
CEC_VENDOR_ID_ONKYO = 0x0009B0
CEC_VENDOR_ID_PANASONIC_EUROPE = 0x000F12
CEC_VENDOR_ID = 0
CEC_BLOCKING = 1
CEC_NONBLOCKING = 0
CEC_AllDevices_T = ct.c_uint32 # enum
(
CEC_AllDevices_eTV,
CEC_AllDevices_eRec1,
CEC_AllDevices_eRec2,
CEC_AllDevices_eSTB1,
CEC_AllDevices_eDVD1,
CEC_AllDevices_eAudioSystem,
CEC_AllDevices_eSTB2,
CEC_AllDevices_eSTB3,
CEC_AllDevices_eDVD2,
CEC_AllDevices_eRec3,
CEC_AllDevices_eSTB4,
CEC_AllDevices_eDVD3,
CEC_AllDevices_eRsvd3,
CEC_AllDevices_eRsvd4,
CEC_AllDevices_eFreeUse,
CEC_AllDevices_eUnRegistered,
) = range(16)
CEC_DEVICE_TYPE_T = ct.c_uint32 # enum
(
CEC_DeviceType_TV,
CEC_DeviceType_Rec,
CEC_DeviceType_Reserved,
CEC_DeviceType_Tuner,
CEC_DeviceType_Playback,
CEC_DeviceType_Audio,
CEC_DeviceType_Switch,
CEC_DeviceType_VidProc,
) = range(8)
CEC_DeviceType_Invalid = 0xF
CEC_OPCODE_T = ct.c_uint32 # enum
CEC_Opcode_FeatureAbort = 0x00
CEC_Opcode_ImageViewOn = 0x04
CEC_Opcode_TunerStepIncrement = 0x05
CEC_Opcode_TunerStepDecrement = 0x06
CEC_Opcode_TunerDeviceStatus = 0x07
CEC_Opcode_GiveTunerDeviceStatus = 0x08
CEC_Opcode_RecordOn = 0x09
CEC_Opcode_RecordStatus = 0x0A
CEC_Opcode_RecordOff = 0x0B
CEC_Opcode_TextViewOn = 0x0D
CEC_Opcode_RecordTVScreen = 0x0F
CEC_Opcode_GiveDeckStatus = 0x1A
CEC_Opcode_DeckStatus = 0x1B
CEC_Opcode_SetMenuLanguage = 0x32
CEC_Opcode_ClearAnalogTimer = 0x33
CEC_Opcode_SetAnalogTimer = 0x34
CEC_Opcode_TimerStatus = 0x35
CEC_Opcode_Standby = 0x36
CEC_Opcode_Play = 0x41
CEC_Opcode_DeckControl = 0x42
CEC_Opcode_TimerClearedStatus = 0x43
CEC_Opcode_UserControlPressed = 0x44
CEC_Opcode_UserControlReleased = 0x45
CEC_Opcode_GiveOSDName = 0x46
CEC_Opcode_SetOSDName = 0x47
CEC_Opcode_SetOSDString = 0x64
CEC_Opcode_SetTimerProgramTitle = 0x67
CEC_Opcode_SystemAudioModeRequest = 0x70
CEC_Opcode_GiveAudioStatus = 0x71
CEC_Opcode_SetSystemAudioMode = 0x72
CEC_Opcode_ReportAudioStatus = 0x7A
CEC_Opcode_GiveSystemAudioModeStatus = 0x7D
CEC_Opcode_SystemAudioModeStatus = 0x7E
CEC_Opcode_RoutingChange = 0x80
CEC_Opcode_RoutingInformation = 0x81
CEC_Opcode_ActiveSource = 0x82
CEC_Opcode_GivePhysicalAddress = 0x83
CEC_Opcode_ReportPhysicalAddress = 0x84
CEC_Opcode_RequestActiveSource = 0x85
CEC_Opcode_SetStreamPath = 0x86
CEC_Opcode_DeviceVendorID = 0x87
CEC_Opcode_VendorCommand = 0x89
CEC_Opcode_VendorRemoteButtonDown = 0x8A
CEC_Opcode_VendorRemoteButtonUp = 0x8B
CEC_Opcode_GiveDeviceVendorID = 0x8C
CEC_Opcode_MenuRequest = 0x8D
CEC_Opcode_MenuStatus = 0x8E
CEC_Opcode_GiveDevicePowerStatus = 0x8F
CEC_Opcode_ReportPowerStatus = 0x90
CEC_Opcode_GetMenuLanguage = 0x91
CEC_Opcode_SelectAnalogService = 0x92
CEC_Opcode_SelectDigitalService = 0x93
CEC_Opcode_SetDigitalTimer = 0x97
CEC_Opcode_ClearDigitalTimer = 0x99
CEC_Opcode_SetAudioRate = 0x9A
CEC_Opcode_InactiveSource = 0x9D
CEC_Opcode_CECVersion = 0x9E
CEC_Opcode_GetCECVersion = 0x9F
CEC_Opcode_VendorCommandWithID = 0xA0
CEC_Opcode_ClearExternalTimer = 0xA1
CEC_Opcode_SetExternalTimer = 0xA2
CEC_Opcode_ReportShortAudioDescriptor = 0xA3
CEC_Opcode_RequestShortAudioDescriptor = 0xA4
CEC_Opcode_InitARC = 0xC0
CEC_Opcode_ReportARCInited = 0xC1
CEC_Opcode_ReportARCTerminated = 0xC2
CEC_Opcode_RequestARCInit = 0xC3
CEC_Opcode_RequestARCTermination = 0xC4
CEC_Opcode_TerminateARC = 0xC5
CEC_Opcode_CDC = 0xF8
CEC_Opcode_Abort = 0xFF
CEC_ABORT_REASON_T = ct.c_uint32 # enum
(
CEC_Abort_Reason_Unrecognised_Opcode,
CEC_Abort_Reason_Wrong_Mode,
CEC_Abort_Reason_Cannot_Provide_Source,
CEC_Abort_Reason_Invalid_Operand,
CEC_Abort_Reason_Refused,
CEC_Abort_Reason_Undetermined,
) = range(6)
CEC_DISPLAY_CONTROL_T = ct.c_uint32 # enum
CEC_DISPLAY_CONTROL_DEFAULT_TIME = 0
CEC_DISPLAY_CONTROL_UNTIL_CLEARED = 1 << 6
CEC_DISPLAY_CONTROL_CLEAR_PREV_MSG = 1 << 7
CEC_POWER_STATUS_T = ct.c_uint32 # enum
(
CEC_POWER_STATUS_ON,
CEC_POWER_STATUS_STANDBY,
CEC_POWER_STATUS_ON_PENDING,
CEC_POWER_STATUS_STANDBY_PENDING,
) = range(4)
CEC_MENU_STATE_T = ct.c_uint32 # enum
(
CEC_MENU_STATE_ACTIVATED,
CEC_MENU_STATE_DEACTIVATED,
CEC_MENU_STATE_QUERY,
) = range(3)
CEC_DECK_INFO_T = ct.c_uint32 # enum
(
CEC_DECK_INFO_PLAY,
CEC_DECK_INFO_RECORD,
CEC_DECK_INFO_PLAY_REVERSE,
CEC_DECK_INFO_STILL,
CEC_DECK_INFO_SLOW,
CEC_DECK_INFO_SLOW_REVERSE,
CEC_DECK_INFO_SEARCH_FORWARD,
CEC_DECK_INFO_SEARCH_REVERSE,
CEC_DECK_INFO_NO_MEDIA,
CEC_DECK_INFO_STOP,
CEC_DECK_INFO_WIND,
CEC_DECK_INFO_REWIND,
CEC_DECK_IDX_SEARCH_FORWARD,
CEC_DECK_IDX_SEARCH_REVERSE,
CEC_DECK_OTHER_STATUS,
) = range(0x11, 0x20)
CEC_DECK_CTRL_MODE_T = ct.c_uint32 # enum
(
CEC_DECK_CTRL_FORWARD,
CEC_DECK_CTRL_BACKWARD,
CEC_DECK_CTRL_STOP,
CEC_DECK_CTRL_EJECT,
) = range(1, 5)
CEC_PLAY_MODE_T = ct.c_uint32 # enum
CEC_PLAY_FORWARD = 0x24
CEC_PLAY_REVERSE = 0x20
CEC_PLAY_STILL = 0x25
CEC_PLAY_SCAN_FORWARD_MIN_SPEED = 0x05
CEC_PLAY_SCAN_FORWARD_MED_SPEED = 0x06
CEC_PLAY_SCAN_FORWARD_MAX_SPEED = 0x07
CEC_PLAY_SCAN_REVERSE_MIN_SPEED = 0x09
CEC_PLAY_SCAN_REVERSE_MED_SPEED = 0x0A
CEC_PLAY_SCAN_REVERSE_MAX_SPEED = 0x0B
CEC_PLAY_SLOW_FORWARD_MIN_SPEED = 0x15
CEC_PLAY_SLOW_FORWARD_MED_SPEED = 0x16
CEC_PLAY_SLOW_FORWARD_MAX_SPEED = 0x17
CEC_PLAY_SLOW_REVERSE_MIN_SPEED = 0x19
CEC_PLAY_SLOW_REVERSE_MED_SPEED = 0x1A
CEC_PLAY_SLOW_REVERSE_MAX_SPEED = 0x1B
CEC_DECK_STATUS_REQUEST_T = ct.c_uint32 # enum
(
CEC_DECK_STATUS_ON,
CEC_DECK_STATUS_OFF,
CEC_DECK_STATUS_ONCE,
) = range(1, 4)
CEC_USER_CONTROL_T = ct.c_uint32 # enum
CEC_User_Control_Select = 0x00
CEC_User_Control_Up = 0x01
CEC_User_Control_Down = 0x02
CEC_User_Control_Left = 0x03
CEC_User_Control_Right = 0x04
CEC_User_Control_RightUp = 0x05
CEC_User_Control_RightDown = 0x06
CEC_User_Control_LeftUp = 0x07
CEC_User_Control_LeftDown = 0x08
CEC_User_Control_RootMenu = 0x09
CEC_User_Control_SetupMenu = 0x0A
CEC_User_Control_ContentsMenu = 0x0B
CEC_User_Control_FavoriteMenu = 0x0C
CEC_User_Control_Exit = 0x0D
CEC_User_Control_Number0 = 0x20
CEC_User_Control_Number1 = 0x21
CEC_User_Control_Number2 = 0x22
CEC_User_Control_Number3 = 0x23
CEC_User_Control_Number4 = 0x24
CEC_User_Control_Number5 = 0x25
CEC_User_Control_Number6 = 0x26
CEC_User_Control_Number7 = 0x27
CEC_User_Control_Number8 = 0x28
CEC_User_Control_Number9 = 0x29
CEC_User_Control_Dot = 0x2A
CEC_User_Control_Enter = 0x2B
CEC_User_Control_Clear = 0x2C
CEC_User_Control_ChannelUp = 0x30
CEC_User_Control_ChannelDown = 0x31
CEC_User_Control_PreviousChannel = 0x32
CEC_User_Control_SoundSelect = 0x33
CEC_User_Control_InputSelect = 0x34
CEC_User_Control_DisplayInformation = 0x35
CEC_User_Control_Help = 0x36
CEC_User_Control_PageUp = 0x37
CEC_User_Control_PageDown = 0x38
CEC_User_Control_Power = 0x40
CEC_User_Control_VolumeUp = 0x41
CEC_User_Control_VolumeDown = 0x42
CEC_User_Control_Mute = 0x43
CEC_User_Control_Play = 0x44
CEC_User_Control_Stop = 0x45
CEC_User_Control_Pause = 0x46
CEC_User_Control_Record = 0x47
CEC_User_Control_Rewind = 0x48
CEC_User_Control_FastForward = 0x49
CEC_User_Control_Eject = 0x4A
CEC_User_Control_Forward = 0x4B
CEC_User_Control_Backward = 0x4C
CEC_User_Control_Angle = 0x50
CEC_User_Control_Subpicture = 0x51
CEC_User_Control_VideoOnDemand = 0x52
CEC_User_Control_EPG = 0x53
CEC_User_Control_TimerProgramming = 0x54
CEC_User_Control_InitialConfig = 0x55
CEC_User_Control_PlayFunction = 0x60
CEC_User_Control_PausePlayFunction = 0x61
CEC_User_Control_RecordFunction = 0x62
CEC_User_Control_PauseRecordFunction = 0x63
CEC_User_Control_StopFunction = 0x64
CEC_User_Control_MuteFunction = 0x65
CEC_User_Control_RestoreVolumeFunction = 0x66
CEC_User_Control_TuneFunction = 0x67
CEC_User_Control_SelectDiskFunction = 0x68
CEC_User_Control_SelectAVInputFunction = 0x69
CEC_User_Control_SelectAudioInputFunction = 0x6A
CEC_User_Control_F1Blue = 0x71
CEC_User_Control_F2Red = 0x72
CEC_User_Control_F3Green = 0x73
CEC_User_Control_F4Yellow = 0x74
CEC_User_Control_F5 = 0x75
class VC_CEC_TOPOLOGY_T(ct.Structure):
_fields_ = [
('active_mask', ct.c_uint16),
('num_devices', ct.c_uint16),
('device_attr', ct.c_uint32 * 16),
]
class VC_CEC_MESSAGE_T(ct.Structure):
_fields_ = [
('length', ct.c_uint32),
('initiator', CEC_AllDevices_T),
('follower', CEC_AllDevices_T),
('payload', ct.c_uint8 * (CEC_MAX_XMIT_LENGTH + 1)),
]
VC_CEC_NOTIFY_T = ct.c_uint32 # enum
VC_CEC_NOTIFY_NONE = 0
VC_CEC_TX = 1 << 0
VC_CEC_RX = 1 << 1
VC_CEC_BUTTON_PRESSED = 1 << 2
VC_CEC_BUTTON_RELEASE = 1 << 3
VC_CEC_REMOTE_PRESSED = 1 << 4
VC_CEC_REMOTE_RELEASE = 1 << 5
VC_CEC_LOGICAL_ADDR = 1 << 6
VC_CEC_TOPOLOGY = 1 << 7
VC_CEC_LOGICAL_ADDR_LOST = 1 << 15
CEC_CALLBACK_T = ct.CFUNCTYPE(
None,
ct.c_void_p, ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32)
CEC_CB_REASON = lambda x: x & 0xFFFF
CEC_CB_MSG_LENGTH = lambda x: (x >> 16) & 0xFF
CEC_CB_RC = lambda x: (x >> 24) & 0xFF
CEC_CB_INITIATOR = lambda x: (x >> 4) & 0xF
CEC_CB_FOLLOWER = lambda x: x & 0xF
CEC_CB_OPCODE = lambda x: (x >> 8) & 0xFF
CEC_CB_OPERAND1 = lambda x: (x >> 16) & 0xFF
CEC_CB_OPERAND2 = lambda x: (x >> 24) & 0xFF
VC_CEC_ERROR_T = ct.c_uint32 # enum
(
VC_CEC_SUCCESS,
VC_CEC_ERROR_NO_ACK,
VC_CEC_ERROR_SHUTDOWN,
VC_CEC_ERROR_BUSY,
VC_CEC_ERROR_NO_LA,
VC_CEC_ERROR_NO_PA,
VC_CEC_ERROR_NO_TOPO,
VC_CEC_ERROR_INVALID_FOLLOWER,
VC_CEC_ERROR_INVALID_ARGUMENT,
) = range(9)
# vc_cecservice.h ############################################################
CECSERVICE_CALLBACK_T = ct.CFUNCTYPE(
None,
ct.c_void_p, ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32)
vc_vchi_cec_init = _lib.vc_vchi_cec_init
vc_vchi_cec_init.argtypes = [VCHI_INSTANCE_T, ct.POINTER(ct.POINTER(VCHI_CONNECTION_T)), ct.c_uint32]
vc_vchi_cec_init.restype = None
vc_vchi_cec_stop = _lib.vc_vchi_cec_stop
vc_vchi_cec_stop.argtypes = []
vc_vchi_cec_stop.restype = None
vc_cec_register_callback = _lib.vc_cec_register_callback
vc_cec_register_callback.argtypes = [CECSERVICE_CALLBACK_T, ct.c_void_p]
vc_cec_register_callback.restype = None
vc_cec_register_command = _lib.vc_cec_register_command
vc_cec_register_command.argtypes = [CEC_OPCODE_T]
vc_cec_register_command.restype = ct.c_int
vc_cec_register_all = _lib.vc_cec_register_all
vc_cec_register_all.argtypes = []
vc_cec_register_all.restype = ct.c_int
vc_cec_deregister_command = _lib.vc_cec_deregister_command
vc_cec_deregister_command.argtypes = [CEC_OPCODE_T]
vc_cec_deregister_command.restype = ct.c_int
vc_cec_deregister_all = _lib.vc_cec_deregister_all
vc_cec_deregister_all.argtypes = []
vc_cec_deregister_all.restype = ct.c_int
vc_cec_send_message = _lib.vc_cec_send_message
vc_cec_send_message.argtypes = [ct.c_uint32, ct.POINTER(ct.c_uint8), ct.c_uint32, vcos_bool_t]
vc_cec_send_message.restype = ct.c_int
vc_cec_get_logical_address = _lib.vc_cec_get_logical_address
vc_cec_get_logical_address.argtypes = [ct.POINTER(CEC_AllDevices_T)]
vc_cec_get_logical_address.restype = ct.c_int
vc_cec_alloc_logical_address = _lib.vc_cec_alloc_logical_address
vc_cec_alloc_logical_address.argtypes = []
vc_cec_alloc_logical_address.restype = ct.c_int
vc_cec_release_logical_address = _lib.vc_cec_release_logical_address
vc_cec_release_logical_address.argtypes = []
vc_cec_release_logical_address.restype = ct.c_int
vc_cec_get_topology = _lib.vc_cec_get_topology
vc_cec_get_topology.argtypes = [ct.POINTER(VC_CEC_TOPOLOGY_T)]
vc_cec_get_topology.restype = ct.c_int
vc_cec_set_vendor_id = _lib.vc_cec_set_vendor_id
vc_cec_set_vendor_id.argtypes = [ct.c_uint32]
vc_cec_set_vendor_id.restype = ct.c_int
vc_cec_set_osd_name = _lib.vc_cec_set_osd_name
vc_cec_set_osd_name.argtypes = [ct.c_char_p]
vc_cec_set_osd_name.restype = ct.c_int
vc_cec_get_physical_address = _lib.vc_cec_get_physical_address
vc_cec_get_physical_address.argtypes = [ct.POINTER(ct.c_uint16)]
vc_cec_get_physical_address.restype = ct.c_int
vc_cec_get_vendor_id = _lib.vc_cec_get_vendor_id
vc_cec_get_vendor_id.argtypes = [CEC_AllDevices_T, ct.POINTER(ct.c_uint32)]
vc_cec_get_vendor_id.restype = ct.c_int
vc_cec_device_type = _lib.vc_cec_device_type
vc_cec_device_type.argtypes = [CEC_AllDevices_T]
vc_cec_device_type.restype = CEC_DEVICE_TYPE_T
vc_cec_send_message2 = _lib.vc_cec_send_message2
vc_cec_send_message2.argtypes = [ct.POINTER(VC_CEC_MESSAGE_T)]
vc_cec_send_message2.restype = ct.c_int
vc_cec_param2message = _lib.vc_cec_param2message
vc_cec_param2message.argtypes = [ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.c_uint32, ct.POINTER(VC_CEC_MESSAGE_T)]
vc_cec_param2message.restype = ct.c_int
vc_cec_poll_address = _lib.vc_cec_poll_address
vc_cec_poll_address.argtypes = [CEC_AllDevices_T]
vc_cec_poll_address.restype = ct.c_int
vc_cec_set_logical_address = _lib.vc_cec_set_logical_address
vc_cec_set_logical_address.argtypes = [CEC_AllDevices_T, CEC_DEVICE_TYPE_T, ct.c_uint32]
vc_cec_set_logical_address.restype = ct.c_int
vc_cec_add_device = _lib.vc_cec_add_device
vc_cec_add_device.argtypes = [CEC_AllDevices_T, ct.c_uint16, CEC_DEVICE_TYPE_T, vcos_bool_t]
vc_cec_add_device.restype = ct.c_int
vc_cec_set_passive = _lib.vc_cec_set_passive
vc_cec_set_passive.argtypes = [vcos_bool_t]
vc_cec_set_passive.restype = ct.c_int
vc_cec_send_FeatureAbort = _lib.vc_cec_send_FeatureAbort
vc_cec_send_FeatureAbort.argtypes = [ct.c_uint32, CEC_OPCODE_T, CEC_ABORT_REASON_T]
vc_cec_send_FeatureAbort.restype = ct.c_int
vc_cec_send_ActiveSource = _lib.vc_cec_send_ActiveSource
vc_cec_send_ActiveSource.argtypes = [ct.c_uint16, vcos_bool_t]
vc_cec_send_ActiveSource.restype = ct.c_int
vc_cec_send_ImageViewOn = _lib.vc_cec_send_ImageViewOn
vc_cec_send_ImageViewOn.argtypes = [ct.c_uint32, vcos_bool_t]
vc_cec_send_ImageViewOn.restype = ct.c_int
vc_cec_send_SetOSDString = _lib.vc_cec_send_SetOSDString
vc_cec_send_SetOSDString.argtypes = [ct.c_uint32, CEC_DISPLAY_CONTROL_T, ct.c_char_p, vcos_bool_t]
vc_cec_send_SetOSDString.restype = ct.c_int
vc_cec_send_Standby = _lib.vc_cec_send_Standby
vc_cec_send_Standby.argtypes = [ct.c_uint32, vcos_bool_t]
vc_cec_send_Standby.restype = ct.c_int
vc_cec_send_MenuStatus = _lib.vc_cec_send_MenuStatus
vc_cec_send_MenuStatus.argtypes = [ct.c_uint32, CEC_MENU_STATE_T, vcos_bool_t]
vc_cec_send_MenuStatus.restype = ct.c_int
vc_cec_send_ReportPhysicalAddress = _lib.vc_cec_send_ReportPhysicalAddress
vc_cec_send_ReportPhysicalAddress.argtypes = [ct.c_uint16, CEC_DEVICE_TYPE_T, vcos_bool_t]
vc_cec_send_ReportPhysicalAddress.restype = ct.c_int
# vc_gencmd.h ################################################################
vc_gencmd_init = _lib.vc_gencmd_init
vc_gencmd_init.argtypes = []
vc_gencmd_init.restype = ct.c_int
vc_gencmd_stop = _lib.vc_gencmd_stop
vc_gencmd_stop.argtypes = []
vc_gencmd_stop.restype = None
vc_gencmd_send = _lib.vc_gencmd_send
vc_gencmd_send.argtypes = [ct.c_char_p]
vc_gencmd_send.restype = ct.c_int
vc_gencmd_read_response = _lib.vc_gencmd_read_response
vc_gencmd_read_response.argtypes = [ct.c_char_p, ct.c_int]
vc_gencmd_read_response.restype = ct.c_int
vc_gencmd = _lib.vc_gencmd
vc_gencmd.argtypes = [ct.c_char_p, ct.c_int, ct.c_char_p]
vc_gencmd.restype = ct.c_int
vc_gencmd_string_property = _lib.vc_gencmd_string_property
vc_gencmd_string_property.argtypes = [ct.c_char_p, ct.c_char_p, ct.POINTER(ct.c_char_p), ct.POINTER(ct.c_int)]
vc_gencmd_string_property.restype = ct.c_int
vc_gencmd_number_property = _lib.vc_gencmd_number_property
vc_gencmd_number_property.argtypes = [ct.c_char_p, ct.c_char_p, ct.POINTER(ct.c_int)]
vc_gencmd_number_property.restype = ct.c_int
vc_gencmd_until = _lib.vc_gencmd_until
vc_gencmd_until.argtypes = [ct.c_char_p, ct.c_char_p, ct.c_char_p, ct.c_char_p, ct.c_int]
vc_gencmd_until.restype = ct.c_int

4165
picamera/camera.py Normal file

File diff suppressed because it is too large Load diff

50
picamera/color.py Normal file
View file

@ -0,0 +1,50 @@
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import colorzero as c0
from .exc import PiCameraDeprecated
NAMED_COLORS = c0.tables.NAMED_COLORS
Red = c0.Red
Green = c0.Green
Blue = c0.Blue
Hue = c0.Hue
Lightness = c0.Lightness
Saturation = c0.Saturation
class Color(c0.Color):
def __new__(cls, *args, **kwargs):
warnings.warn(
PiCameraDeprecated(
'The picamera.color module and Color class are deprecated; '
'please use the colorzero library (same API) instead'))
return c0.Color.__new__(cls, *args, **kwargs)

320
picamera/display.py Normal file
View file

@ -0,0 +1,320 @@
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import mimetypes
import ctypes as ct
from functools import reduce
from operator import or_
from . import bcm_host, mmalobj as mo, mmal
from .encoders import PiCookedOneImageEncoder, PiRawOneImageEncoder
from .exc import PiCameraRuntimeError, PiCameraValueError
class PiDisplay(object):
__slots__ = (
'_display',
'_info',
'_transform',
'_exif_tags',
)
_ROTATIONS = {
bcm_host.DISPMANX_NO_ROTATE: 0,
bcm_host.DISPMANX_ROTATE_90: 90,
bcm_host.DISPMANX_ROTATE_180: 180,
bcm_host.DISPMANX_ROTATE_270: 270,
}
_ROTATIONS_R = {v: k for k, v in _ROTATIONS.items()}
_ROTATIONS_MASK = reduce(or_, _ROTATIONS.keys(), 0)
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
def __init__(self, display_num=0):
bcm_host.bcm_host_init()
self._exif_tags = {}
self._display = bcm_host.vc_dispmanx_display_open(display_num)
self._transform = bcm_host.DISPMANX_NO_ROTATE
if not self._display:
raise PiCameraRuntimeError('unable to open display %d' % display_num)
self._info = bcm_host.DISPMANX_MODEINFO_T()
if bcm_host.vc_dispmanx_display_get_info(self._display, self._info):
raise PiCameraRuntimeError('unable to get display info')
def close(self):
bcm_host.vc_dispmanx_display_close(self._display)
self._display = None
@property
def closed(self):
return self._display is None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
return format
def _get_image_encoder(self, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture`. The *output_port* parameter
gives the MMAL port that the encoder should read output from. The
*format* parameter indicates the image format and will be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, None, output_port, format, resize, **options)
def capture(self, output, format=None, resize=None, **options):
format = self._get_image_format(output, format)
if format == 'yuv':
raise PiCameraValueError('YUV format is unsupported at this time')
res = self.resolution
if (self._info.transform & bcm_host.DISPMANX_ROTATE_90) or (
self._info.transform & bcm_host.DISPMANX_ROTATE_270):
res = res.transpose()
transform = self._transform
if (transform & bcm_host.DISPMANX_ROTATE_90) or (
transform & bcm_host.DISPMANX_ROTATE_270):
res = res.transpose()
source = mo.MMALPythonSource()
source.outputs[0].format = mmal.MMAL_ENCODING_RGB24
if format == 'bgr':
source.outputs[0].format = mmal.MMAL_ENCODING_BGR24
transform |= bcm_host.DISPMANX_SNAPSHOT_SWAP_RED_BLUE
source.outputs[0].framesize = res
source.outputs[0].commit()
encoder = self._get_image_encoder(
source.outputs[0], format, resize, **options)
try:
encoder.start(output)
try:
pitch = res.pad(width=16).width * 3
image_ptr = ct.c_uint32()
resource = bcm_host.vc_dispmanx_resource_create(
bcm_host.VC_IMAGE_RGB888, res.width, res.height, image_ptr)
if not resource:
raise PiCameraRuntimeError(
'unable to allocate resource for capture')
try:
buf = source.outputs[0].get_buffer()
if bcm_host.vc_dispmanx_snapshot(self._display, resource, transform):
raise PiCameraRuntimeError('failed to capture snapshot')
rect = bcm_host.VC_RECT_T(0, 0, res.width, res.height)
if bcm_host.vc_dispmanx_resource_read_data(resource, rect, buf._buf[0].data, pitch):
raise PiCameraRuntimeError('failed to read snapshot')
buf._buf[0].length = pitch * res.height
buf._buf[0].flags = (
mmal.MMAL_BUFFER_HEADER_FLAG_EOS |
mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END
)
finally:
bcm_host.vc_dispmanx_resource_delete(resource)
source.outputs[0].send_buffer(buf)
# XXX Anything more intelligent than a 10 second default?
encoder.wait(10)
finally:
encoder.stop()
finally:
encoder.close()
def _calculate_transform(self):
"""
Calculates a reverse transform to undo any that the boot configuration
applies (presumably the user has altered the boot configuration to
match their screen orientation so they want any capture to appear
correctly oriented by default). This is then modified by the transforms
specified in the :attr:`rotation`, :attr:`hflip` and :attr:`vflip`
attributes.
"""
r = PiDisplay._ROTATIONS[self._info.transform & PiDisplay._ROTATIONS_MASK]
r = (360 - r) % 360 # undo the native rotation
r = (r + self.rotation) % 360 # add selected rotation
result = PiDisplay._ROTATIONS_R[r]
result |= self._info.transform & ( # undo flips by re-doing them
bcm_host.DISPMANX_FLIP_HRIZ | bcm_host.DISPMANX_FLIP_VERT
)
return result
@property
def resolution(self):
"""
Retrieves the resolution of the display device.
"""
return mo.PiResolution(width=self._info.width, height=self._info.height)
def _get_hflip(self):
return bool(self._info.transform & bcm_host.DISPMANX_FLIP_HRIZ)
def _set_hflip(self, value):
if value:
self._info.transform |= bcm_host.DISPMANX_FLIP_HRIZ
else:
self._info.transform &= ~bcm_host.DISPMANX_FLIP_HRIZ
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether snapshots are horizontally flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the output of :meth:`capture` is horizontally flipped.
The default is ``False``.
.. note::
This property only affects snapshots; it does not affect the
display output itself.
""")
def _get_vflip(self):
return bool(self._info.transform & bcm_host.DISPMANX_FLIP_VERT)
def _set_vflip(self, value):
if value:
self._info.transform |= bcm_host.DISPMANX_FLIP_VERT
else:
self._info.transform &= ~bcm_host.DISPMANX_FLIP_VERT
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether snapshots are vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the output of :meth:`capture` is vertically flipped. The
default is ``False``.
.. note::
This property only affects snapshots; it does not affect the
display output itself.
""")
def _get_rotation(self):
return PiDisplay._ROTATIONS[self._transform & PiDisplay._ROTATIONS_MASK]
def _set_rotation(self, value):
try:
self._transform = (
self._transform & ~PiDisplay._ROTATIONS_MASK) | PiDisplay._ROTATIONS_R[value]
except KeyError:
raise PiCameraValueError('invalid rotation %d' % value)
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the rotation of snapshots.
When queried, the :attr:`rotation` property returns the rotation
applied to the result of :meth:`capture`. Valid values are 0, 90, 180,
and 270. When set, the property changes the rotation applied to the
result of :meth:`capture`. The default is 0.
.. note::
This property only affects snapshots; it does not affect the
display itself. To rotate the display itself, modify the
``display_rotate`` value in :file:`/boot/config.txt`.
""")
def _get_exif_tags(self):
return self._exif_tags
def _set_exif_tags(self, value):
self._exif_tags = {k: v for k, v in value.items()}
exif_tags = property(_get_exif_tags, _set_exif_tags)

1218
picamera/encoders.py Normal file

File diff suppressed because it is too large Load diff

185
picamera/exc.py Normal file
View file

@ -0,0 +1,185 @@
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import picamera.mmal as mmal
class PiCameraWarning(Warning):
"""
Base class for PiCamera warnings.
"""
class PiCameraDeprecated(PiCameraWarning, DeprecationWarning):
"""
Raised when deprecated functionality in picamera is used.
"""
class PiCameraFallback(PiCameraWarning, RuntimeWarning):
"""
Raised when picamera has to fallback on old functionality.
"""
class PiCameraResizerEncoding(PiCameraWarning, RuntimeWarning):
"""
Raised when picamera uses a resizer purely for encoding purposes.
"""
class PiCameraAlphaStripping(PiCameraWarning, RuntimeWarning):
"""
Raised when picamera does alpha-byte stripping.
"""
class PiCameraResolutionRounded(PiCameraWarning, RuntimeWarning):
"""
Raised when picamera has to round a requested frame size upward.
"""
class PiCameraError(Exception):
"""
Base class for PiCamera errors.
"""
class PiCameraRuntimeError(PiCameraError, RuntimeError):
"""
Raised when an invalid sequence of operations is attempted with a
:class:`PiCamera` object.
"""
class PiCameraClosed(PiCameraRuntimeError):
"""
Raised when a method is called on a camera which has already been closed.
"""
class PiCameraNotRecording(PiCameraRuntimeError):
"""
Raised when :meth:`~PiCamera.stop_recording` or
:meth:`~PiCamera.split_recording` are called against a port which has no
recording active.
"""
class PiCameraAlreadyRecording(PiCameraRuntimeError):
"""
Raised when :meth:`~PiCamera.start_recording` or
:meth:`~PiCamera.record_sequence` are called against a port which already
has an active recording.
"""
class PiCameraValueError(PiCameraError, ValueError):
"""
Raised when an invalid value is fed to a :class:`~PiCamera` object.
"""
class PiCameraIOError(PiCameraError, IOError):
"""
Raised when a :class:`~PiCamera` object is unable to perform an IO
operation.
"""
class PiCameraMMALError(PiCameraError):
"""
Raised when an MMAL operation fails for whatever reason.
"""
def __init__(self, status, prefix=""):
self.status = status
PiCameraError.__init__(self, "%s%s%s" % (prefix, ": " if prefix else "", {
mmal.MMAL_ENOMEM: "Out of memory",
mmal.MMAL_ENOSPC: "Out of resources",
mmal.MMAL_EINVAL: "Argument is invalid",
mmal.MMAL_ENOSYS: "Function not implemented",
mmal.MMAL_ENOENT: "No such file or directory",
mmal.MMAL_ENXIO: "No such device or address",
mmal.MMAL_EIO: "I/O error",
mmal.MMAL_ESPIPE: "Illegal seek",
mmal.MMAL_ECORRUPT: "Data is corrupt #FIXME not POSIX",
mmal.MMAL_ENOTREADY: "Component is not ready #FIXME not POSIX",
mmal.MMAL_ECONFIG: "Component is not configured #FIXME not POSIX",
mmal.MMAL_EISCONN: "Port is already connected",
mmal.MMAL_ENOTCONN: "Port is disconnected",
mmal.MMAL_EAGAIN: "Resource temporarily unavailable; try again later",
mmal.MMAL_EFAULT: "Bad address",
}.get(status, "Unknown status error")))
class PiCameraPortDisabled(PiCameraMMALError):
"""
Raised when attempting a buffer operation on a disabled port.
This exception is intended for the common use-case of attempting to get
or send a buffer just when a component is shutting down (e.g. at script
teardown) and simplifies the trivial response (ignore the error and shut
down quietly). For example::
def _callback(self, port, buf):
try:
buf = self.outputs[0].get_buffer(False)
except PiCameraPortDisabled:
return True # shutting down
# ...
"""
def __init__(self, msg):
super(PiCameraPortDisabled, self).__init__(mmal.MMAL_EINVAL, msg)
def mmal_check(status, prefix=""):
"""
Checks the return status of an mmal call and raises an exception on
failure.
The *status* parameter is the result of an MMAL call. If *status* is
anything other than MMAL_SUCCESS, a :exc:`PiCameraMMALError` exception is
raised. The optional *prefix* parameter specifies a prefix message to place
at the start of the exception's message to provide some context.
"""
if status != mmal.MMAL_SUCCESS:
raise PiCameraMMALError(status, prefix)

214
picamera/frames.py Normal file
View file

@ -0,0 +1,214 @@
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str and range equivalent to Py3's
str = type('')
import warnings
from collections import namedtuple
from picamera.exc import (
mmal_check,
PiCameraError,
PiCameraMMALError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraDeprecated,
)
class PiVideoFrameType(object):
"""
This class simply defines constants used to represent the type of a frame
in :attr:`PiVideoFrame.frame_type`. Effectively it is a namespace for an
enum.
.. attribute:: frame
Indicates a predicted frame (P-frame). This is the most common frame
type.
.. attribute:: key_frame
Indicates an intra-frame (I-frame) also known as a key frame.
.. attribute:: sps_header
Indicates an inline SPS/PPS header (rather than picture data) which is
typically used as a split point.
.. attribute:: motion_data
Indicates the frame is inline motion vector data, rather than picture
data.
.. versionadded:: 1.5
"""
frame = 0
key_frame = 1
sps_header = 2
motion_data = 3
class PiVideoFrame(namedtuple('PiVideoFrame', (
'index', # the frame number, where the first frame is 0
'frame_type', # a constant indicating the frame type (see PiVideoFrameType)
'frame_size', # the size (in bytes) of the frame's data
'video_size', # the size (in bytes) of the video so far
'split_size', # the size (in bytes) of the video since the last split
'timestamp', # the presentation timestamp (PTS) of the frame
'complete', # whether the frame is complete or not
))):
"""
This class is a :func:`~collections.namedtuple` derivative used to store
information about a video frame. It is recommended that you access the
information stored by this class by attribute name rather than position
(for example: ``frame.index`` rather than ``frame[0]``).
.. attribute:: index
Returns the zero-based number of the frame. This is a monotonic counter
that is simply incremented every time the camera starts outputting a
new frame. As a consequence, this attribute cannot be used to detect
dropped frames. Nor does it necessarily represent actual frames; it
will be incremented for SPS headers and motion data buffers too.
.. attribute:: frame_type
Returns a constant indicating the kind of data that the frame contains
(see :class:`PiVideoFrameType`). Please note that certain frame types
contain no image data at all.
.. attribute:: frame_size
Returns the size in bytes of the current frame. If a frame is written
in multiple chunks, this value will increment while :attr:`index`
remains static. Query :attr:`complete` to determine whether the frame
has been completely output yet.
.. attribute:: video_size
Returns the size in bytes of the entire video up to this frame. Note
that this is unlikely to match the size of the actual file/stream
written so far. This is because a stream may utilize buffering which
will cause the actual amount written (e.g. to disk) to lag behind the
value reported by this attribute.
.. attribute:: split_size
Returns the size in bytes of the video recorded since the last call to
either :meth:`~PiCamera.start_recording` or
:meth:`~PiCamera.split_recording`. For the reasons explained above,
this may differ from the size of the actual file/stream written so far.
.. attribute:: timestamp
Returns the presentation timestamp (PTS) of the frame. This represents
the point in time that the Pi received the first line of the frame from
the camera.
The timestamp is measured in microseconds (millionths of a second).
When the camera's clock mode is ``'reset'`` (the default), the
timestamp is relative to the start of the video recording. When the
camera's :attr:`~PiCamera.clock_mode` is ``'raw'``, it is relative to
the last system reboot. See :attr:`~PiCamera.timestamp` for more
information.
.. warning::
Currently, the camera occasionally returns "time unknown" values in
this field. In this case, picamera will simply re-use the timestamp
of the previous frame (under the assumption that time never goes
backwards). This happens for SPS header "frames", for example.
.. attribute:: complete
Returns a bool indicating whether the current frame is complete or not.
If the frame is complete then :attr:`frame_size` will not increment
any further, and will reset for the next frame.
.. versionchanged:: 1.5
Deprecated :attr:`header` and :attr:`keyframe` attributes and added the
new :attr:`frame_type` attribute instead.
.. versionchanged:: 1.9
Added the :attr:`complete` attribute.
"""
__slots__ = () # workaround python issue #24931
@property
def position(self):
"""
Returns the zero-based position of the frame in the stream containing
it.
"""
return self.split_size - self.frame_size
@property
def keyframe(self):
"""
Returns a bool indicating whether the current frame is a keyframe (an
intra-frame, or I-frame in MPEG parlance).
.. deprecated:: 1.5
Please compare :attr:`frame_type` to
:attr:`PiVideoFrameType.key_frame` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiVideoFrame.keyframe is deprecated; please check '
'PiVideoFrame.frame_type for equality with '
'PiVideoFrameType.key_frame instead'))
return self.frame_type == PiVideoFrameType.key_frame
@property
def header(self):
"""
Contains a bool indicating whether the current frame is actually an
SPS/PPS header. Typically it is best to split an H.264 stream so that
it starts with an SPS/PPS header.
.. deprecated:: 1.5
Please compare :attr:`frame_type` to
:attr:`PiVideoFrameType.sps_header` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiVideoFrame.header is deprecated; please check '
'PiVideoFrame.frame_type for equality with '
'PiVideoFrameType.sps_header instead'))
return self.frame_type == PiVideoFrameType.sps_header

2481
picamera/mmal.py Normal file

File diff suppressed because it is too large Load diff

3736
picamera/mmalobj.py Normal file

File diff suppressed because it is too large Load diff

605
picamera/renderers.py Normal file
View file

@ -0,0 +1,605 @@
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import ctypes as ct
from . import mmal, mmalobj as mo
from .exc import (
PiCameraRuntimeError,
PiCameraValueError,
mmal_check,
)
class PiRenderer(object):
"""
Wraps :class:`~mmalobj.MMALRenderer` for use by PiCamera.
The *parent* parameter specifies the :class:`PiCamera` instance that has
constructed this renderer. All other parameters set the initial values
of the correspondingly named attributes (e.g. the *layer* parameter
sets the initial value of the :attr:`layer` attribute, the *crop* parameter
sets the initial value of the :attr:`crop` attribute, etc).
This base class isn't directly used by :class:`PiCamera`, but the two
derivatives defined below, :class:`PiOverlayRenderer` and
:class:`PiPreviewRenderer`, are used to produce overlays and the camera
preview respectively.
.. versionchanged:: 1.14
Added *anamorphic* parameter
"""
def __init__(
self, parent, layer=0, alpha=255, fullscreen=True, window=None,
crop=None, rotation=0, vflip=False, hflip=False, anamorphic=False):
# Create and enable the renderer component
self._rotation = 0
self._vflip = False
self._hflip = False
self.renderer = mo.MMALRenderer()
try:
self.layer = layer
self.alpha = alpha
self.fullscreen = fullscreen
self.anamorphic = anamorphic
if window is not None:
self.window = window
if crop is not None:
self.crop = crop
self.rotation = rotation
self.vflip = vflip
self.hflip = hflip
self.renderer.enable()
except:
self.renderer.close()
raise
def close(self):
"""
Finalizes the renderer and deallocates all structures.
This method is called by the camera prior to destroying the renderer
(or more precisely, letting it go out of scope to permit the garbage
collector to destroy it at some future time).
"""
if self.renderer:
self.renderer.close()
self.renderer = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def _get_alpha(self):
return self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION].alpha
def _set_alpha(self, value):
try:
if not (0 <= value <= 255):
raise PiCameraValueError(
"Invalid alpha value: %d (valid range 0..255)" % value)
except TypeError:
raise PiCameraValueError("Invalid alpha value: %s" % value)
mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION]
mp.set = mmal.MMAL_DISPLAY_SET_ALPHA
mp.alpha = value
self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp
alpha = property(_get_alpha, _set_alpha, doc="""\
Retrieves or sets the opacity of the renderer.
When queried, the :attr:`alpha` property returns a value between 0 and
255 indicating the opacity of the renderer, where 0 is completely
transparent and 255 is completely opaque. The default value is 255. The
property can be set while recordings or previews are in progress.
.. note::
If the renderer is being fed RGBA data (as in partially transparent
overlays), the alpha property will be ignored.
""")
def _get_layer(self):
return self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION].layer
def _set_layer(self, value):
try:
if not (0 <= value <= 255):
raise PiCameraValueError(
"Invalid layer value: %d (valid range 0..255)" % value)
except TypeError:
raise PiCameraValueError("Invalid layer value: %s" % value)
mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION]
mp.set = mmal.MMAL_DISPLAY_SET_LAYER
mp.layer = value
self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp
layer = property(_get_layer, _set_layer, doc="""\
Retrieves or sets the layer of the renderer.
The :attr:`layer` property is an integer which controls the layer that
the renderer occupies. Higher valued layers obscure lower valued layers
(with 0 being the "bottom" layer). The default value is 2. The property
can be set while recordings or previews are in progress.
""")
def _get_fullscreen(self):
return self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION].fullscreen.value != mmal.MMAL_FALSE
def _set_fullscreen(self, value):
mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION]
mp.set = mmal.MMAL_DISPLAY_SET_FULLSCREEN
mp.fullscreen = bool(value)
self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp
fullscreen = property(_get_fullscreen, _set_fullscreen, doc="""\
Retrieves or sets whether the renderer appears full-screen.
The :attr:`fullscreen` property is a bool which controls whether the
renderer takes up the entire display or not. When set to ``False``, the
:attr:`window` property can be used to control the precise size of the
renderer display. The property can be set while recordings or previews
are active.
""")
def _get_anamorphic(self):
return self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION].noaspect.value != mmal.MMAL_FALSE
def _set_anamorphic(self, value):
mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION]
mp.set = mmal.MMAL_DISPLAY_SET_NOASPECT
mp.noaspect = bool(value)
self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp
anamorphic = property(_get_anamorphic, _set_anamorphic, doc="""\
Retrieves or sets whether the renderer is `anamorphic`_.
The :attr:`anamorphic` property is a bool which controls whether the
renderer respects the `aspect ratio`_ of the source. When ``False``
(the default) the source aspect ratio is respected. When set to
``True``, the aspect ratio of the source is anamorphed. This can help
with things like 16:9 widescreen composite outputs for previews without
having to change the cameras output ratio. The property can be set
while recordings or previews are active.
.. versionadded:: 1.14
.. _aspect ratio: https://en.wikipedia.org/wiki/Aspect_ratio_(image)
.. _anamorphic: https://en.wikipedia.org/wiki/Anamorphic_widescreen
""")
def _get_window(self):
mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION]
return (
mp.dest_rect.x,
mp.dest_rect.y,
mp.dest_rect.width,
mp.dest_rect.height,
)
def _set_window(self, value):
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid window rectangle (x, y, w, h) tuple: %s" % value)
mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION]
mp.set = mmal.MMAL_DISPLAY_SET_DEST_RECT
mp.dest_rect = mmal.MMAL_RECT_T(x, y, w, h)
self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp
window = property(_get_window, _set_window, doc="""\
Retrieves or sets the size of the renderer.
When the :attr:`fullscreen` property is set to ``False``, the
:attr:`window` property specifies the size and position of the renderer
on the display. The property is a 4-tuple consisting of ``(x, y, width,
height)``. The property can be set while recordings or previews are
active.
""")
def _get_crop(self):
mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION]
return (
mp.src_rect.x,
mp.src_rect.y,
mp.src_rect.width,
mp.src_rect.height,
)
def _set_crop(self, value):
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid crop rectangle (x, y, w, h) tuple: %s" % value)
mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION]
mp.set = mmal.MMAL_DISPLAY_SET_SRC_RECT
mp.src_rect = mmal.MMAL_RECT_T(x, y, w, h)
self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp
crop = property(_get_crop, _set_crop, doc="""\
Retrieves or sets the area to read from the source.
The :attr:`crop` property specifies the rectangular area that the
renderer will read from the source as a 4-tuple of ``(x, y, width,
height)``. The special value ``(0, 0, 0, 0)`` (which is also the
default) means to read entire area of the source. The property can be
set while recordings or previews are active.
For example, if the camera's resolution is currently configured as
1280x720, setting this attribute to ``(160, 160, 640, 400)`` will
crop the preview to the center 640x400 pixels of the input. Note that
this property does not affect the size of the output rectangle,
which is controlled with :attr:`fullscreen` and :attr:`window`.
.. note::
This property only affects the renderer; it has no bearing on image
captures or recordings (unlike the :attr:`~PiCamera.zoom` property
of the :class:`PiCamera` class).
""")
def _get_rotation(self):
return self._rotation
def _set_rotation(self, value):
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
self._set_transform(
self._get_transform(value, self._vflip, self._hflip))
self._rotation = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the renderer.
When queried, the :attr:`rotation` property returns the rotation
applied to the renderer. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the renderer's
output. The property can be set while recordings or previews are
active. The default is 0.
.. note::
This property only affects the renderer; it has no bearing on image
captures or recordings (unlike the :attr:`~PiCamera.rotation`
property of the :class:`PiCamera` class).
""")
def _get_vflip(self):
return self._vflip
def _set_vflip(self, value):
value = bool(value)
self._set_transform(
self._get_transform(self._rotation, value, self._hflip))
self._vflip = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the renderer's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the renderer's output is vertically flipped. The
property can be set while recordings or previews are in progress. The
default is ``False``.
.. note::
This property only affects the renderer; it has no bearing on image
captures or recordings (unlike the :attr:`~PiCamera.vflip` property
of the :class:`PiCamera` class).
""")
def _get_hflip(self):
return self._hflip
def _set_hflip(self, value):
value = bool(value)
self._set_transform(
self._get_transform(self._rotation, self._vflip, value))
self._hflip = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the renderer's output is horizontally
flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the renderer's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default is ``False``.
.. note::
This property only affects the renderer; it has no bearing on image
captures or recordings (unlike the :attr:`~PiCamera.hflip` property
of the :class:`PiCamera` class).
""")
def _get_transform(self, rotate, vflip, hflip):
# Use a (horizontally) mirrored transform if one of vflip or hflip is
# set. If vflip is set, rotate by an extra 180 degrees to make up for
# the lack of a "true" vertical flip
mirror = vflip ^ hflip
if vflip:
rotate = (rotate + 180) % 360
return {
(0, False): mmal.MMAL_DISPLAY_ROT0,
(90, False): mmal.MMAL_DISPLAY_ROT90,
(180, False): mmal.MMAL_DISPLAY_ROT180,
(270, False): mmal.MMAL_DISPLAY_ROT270,
(0, True): mmal.MMAL_DISPLAY_MIRROR_ROT0,
(90, True): mmal.MMAL_DISPLAY_MIRROR_ROT90,
(180, True): mmal.MMAL_DISPLAY_MIRROR_ROT180,
(270, True): mmal.MMAL_DISPLAY_MIRROR_ROT270,
}[(rotate, mirror)]
def _set_transform(self, value):
mp = self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION]
mp.set = mmal.MMAL_DISPLAY_SET_TRANSFORM
mp.transform = value
self.renderer.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = mp
class PiOverlayRenderer(PiRenderer):
"""
Represents an :class:`~mmalobj.MMALRenderer` with a static source for
overlays.
This class descends from :class:`PiRenderer` and adds a static *source* for
the :class:`~mmalobj.MMALRenderer`. The *source* must be an object that
supports the :ref:`buffer protocol <bufferobjects>` in one of the supported
formats.
The optional *resolution* parameter specifies the size of the *source* as a
``(width, height)`` tuple. If this is omitted or ``None`` then the
resolution is assumed to be the same as the parent camera's current
:attr:`~PiCamera.resolution`. The optional *format* parameter specifies the
encoding of the *source*. This can be one of the unencoded formats:
``'yuv'``, ``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. If omitted or
``None``, *format* will be guessed based on the size of *source* (assuming
3 bytes for `RGB`_, and 4 bytes for `RGBA`_).
The length of *source* must take into account that widths are rounded up to
the nearest multiple of 32, and heights to the nearest multiple of 16. For
example, if *resolution* is ``(1280, 720)``, and *format* is ``'rgb'`` then
*source* must be a buffer with length 1280 x 720 x 3 bytes, or 2,764,800
bytes (because 1280 is a multiple of 32, and 720 is a multiple of 16 no
extra rounding is required). However, if *resolution* is ``(97, 57)``, and
*format* is ``'rgb'`` then *source* must be a buffer with length 128 x 64 x
3 bytes, or 24,576 bytes (pixels beyond column 97 and row 57 in the source
will be ignored).
The *layer*, *alpha*, *fullscreen*, and *window* parameters are the same
as in :class:`PiRenderer`.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionchanged:: 1.13
Added *format* parameter
.. versionchanged:: 1.14
Added *anamorphic* parameter
"""
SOURCE_BPP = {
3: 'rgb',
4: 'rgba',
}
SOURCE_ENCODINGS = {
'yuv': mmal.MMAL_ENCODING_I420,
'rgb': mmal.MMAL_ENCODING_RGB24,
'rgba': mmal.MMAL_ENCODING_RGBA,
'bgr': mmal.MMAL_ENCODING_BGR24,
'bgra': mmal.MMAL_ENCODING_BGRA,
}
def __init__(
self, parent, source, resolution=None, format=None, layer=0,
alpha=255, fullscreen=True, window=None, crop=None, rotation=0,
vflip=False, hflip=False, anamorphic=False):
super(PiOverlayRenderer, self).__init__(
parent, layer, alpha, fullscreen, window, crop,
rotation, vflip, hflip, anamorphic)
# Copy format from camera's preview port, then adjust the encoding to
# RGB888 or RGBA and optionally adjust the resolution and size
if resolution is not None:
self.renderer.inputs[0].framesize = resolution
else:
self.renderer.inputs[0].framesize = parent.resolution
self.renderer.inputs[0].framerate = 0
if format is None:
source_len = mo.buffer_bytes(source)
plane_size = self.renderer.inputs[0].framesize.pad()
plane_len = plane_size.width * plane_size.height
try:
format = self.SOURCE_BPP[source_len // plane_len]
except KeyError:
raise PiCameraValueError(
'unable to determine format from source size')
try:
self.renderer.inputs[0].format = self.SOURCE_ENCODINGS[format]
except KeyError:
raise PiCameraValueError('unknown format %s' % format)
self.renderer.inputs[0].commit()
# The following callback is required to prevent the mmalobj layer
# automatically passing buffers back to the port
self.renderer.inputs[0].enable(callback=lambda port, buf: True)
self.update(source)
def update(self, source):
"""
Update the overlay with a new source of data.
The new *source* buffer must have the same size as the original buffer
used to create the overlay. There is currently no method for changing
the size of an existing overlay (remove and recreate the overlay if you
require this).
.. note::
If you repeatedly update an overlay renderer, you must make sure
that you do so at a rate equal to, or slower than, the camera's
framerate. Going faster will rapidly starve the renderer's pool of
buffers leading to a runtime error.
"""
buf = self.renderer.inputs[0].get_buffer()
buf.data = source
self.renderer.inputs[0].send_buffer(buf)
class PiPreviewRenderer(PiRenderer):
"""
Represents an :class:`~mmalobj.MMALRenderer` which uses the camera's
preview as a source.
This class descends from :class:`PiRenderer` and adds an
:class:`~mmalobj.MMALConnection` to connect the renderer to an MMAL port.
The *source* parameter specifies the :class:`~mmalobj.MMALPort` to connect
to the renderer. The *resolution* parameter can be used to override the
framesize of the *source*. See :attr:`resolution` for details of when this
is useful.
All other parameters are the same as in :class:`PiRenderer`.
.. versionchanged:: 1.14
Added *anamorphic* parameter
"""
def __init__(
self, parent, source, resolution=None, layer=2, alpha=255,
fullscreen=True, window=None, crop=None, rotation=0, vflip=False,
hflip=False, anamorphic=False):
super(PiPreviewRenderer, self).__init__(
parent, layer, alpha, fullscreen, window, crop,
rotation, vflip, hflip, anamorphic)
self._parent = parent
if resolution is not None:
resolution = mo.to_resolution(resolution)
source.framesize = resolution
self.renderer.inputs[0].connect(source).enable()
def _get_resolution(self):
result = self._parent._camera.outputs[self._parent.CAMERA_PREVIEW_PORT].framesize
if result != self._parent.resolution:
return result
else:
return None
def _set_resolution(self, value):
if value is not None:
value = mo.to_resolution(value)
if (
value.width > self._parent.resolution.width or
value.height > self._parent.resolution.height
):
raise PiCameraValueError(
'preview resolution cannot exceed camera resolution')
self.renderer.connection.disable()
if value is None:
value = self._parent.resolution
self._parent._camera.outputs[self._parent.CAMERA_PREVIEW_PORT].framesize = value
self._parent._camera.outputs[self._parent.CAMERA_PREVIEW_PORT].commit()
self.renderer.connection.enable()
resolution = property(_get_resolution, _set_resolution, doc="""\
Retrieves or sets the resolution of the preview renderer.
By default, the preview's resolution matches the camera's resolution.
However, particularly high resolutions (such as the maximum resolution
of the V2 camera module) can cause issues. In this case, you may wish
to set a lower resolution for the preview that the camera's resolution.
When queried, the :attr:`resolution` property returns ``None`` if the
preview's resolution is derived from the camera's. In this case,
changing the camera's resolution will also cause the preview's
resolution to change. Otherwise, it returns the current preview
resolution as a tuple.
.. note::
The preview resolution cannot be greater than the camera's
resolution. If you set a preview resolution, then change the
camera's resolution below the preview's resolution, this property
will silently revert to ``None``, meaning the preview's resolution
will follow the camera's resolution.
When set, the property reconfigures the preview renderer with the new
resolution. As a special case, setting the property to ``None`` will
cause the preview to follow the camera's resolution once more. The
property can be set while recordings are in progress. The default is
``None``.
.. note::
This property only affects the renderer; it has no bearing on image
captures or recordings (unlike the :attr:`~PiCamera.resolution`
property of the :class:`PiCamera` class).
.. versionadded:: 1.11
""")
class PiNullSink(object):
"""
Implements an :class:`~mmalobj.MMALNullSink` which can be used in place of
a renderer.
The *parent* parameter specifies the :class:`PiCamera` instance which
constructed this :class:`~mmalobj.MMALNullSink`. The *source* parameter
specifies the :class:`~mmalobj.MMALPort` which the null-sink should connect
to its input.
The null-sink can act as a drop-in replacement for :class:`PiRenderer` in
most cases, but obviously doesn't implement attributes like ``alpha``,
``layer``, etc. as it simply dumps any incoming frames. This is also the
reason that this class doesn't derive from :class:`PiRenderer` like all
other classes in this module.
"""
def __init__(self, parent, source):
self.renderer = mo.MMALNullSink()
self.renderer.enable()
self.renderer.inputs[0].connect(source).enable()
def close(self):
"""
Finalizes the null-sink and deallocates all structures.
This method is called by the camera prior to destroying the null-sink
(or more precisely, letting it go out of scope to permit the garbage
collector to destroy it at some future time).
"""
if self.renderer:
self.renderer.close()
self.renderer = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()

833
picamera/streams.py Normal file
View file

@ -0,0 +1,833 @@
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import io
from threading import RLock
from collections import deque
from operator import attrgetter
from weakref import ref
from picamera.exc import PiCameraValueError
from picamera.frames import PiVideoFrame, PiVideoFrameType
class BufferIO(io.IOBase):
"""
A stream which uses a :class:`memoryview` for storage.
This is used internally by picamera for capturing directly to an existing
object which supports the buffer protocol (like a numpy array). Because the
underlying storage is fixed in size, the stream also has a fixed size and
will raise an :exc:`IOError` exception if an attempt is made to write
beyond the end of the buffer (though seek beyond the end is supported).
Users should never need this class directly.
"""
__slots__ = ('_buf', '_pos', '_size')
def __init__(self, obj):
self._buf = memoryview(obj)
if self._buf.ndim > 1 or self._buf.format != 'B':
try:
# Py2.7 doesn't have memoryview.cast
self._buf = self._buf.cast('B')
except AttributeError:
raise ValueError(
'buffer object must be one-dimensional and have unsigned '
'byte format ("B")')
self._pos = 0
self._size = self._buf.shape[0]
def close(self):
super(BufferIO, self).close()
try:
self._buf.release()
except AttributeError:
# Py2.7 doesn't have memoryview.release
pass
def _check_open(self):
if self.closed:
raise ValueError('I/O operation on a closed stream')
@property
def size(self):
"""
Return the maximum size of the buffer in bytes.
"""
return self._size
def readable(self):
"""
Returns ``True``, indicating that the stream supports :meth:`read`.
"""
self._check_open()
return True
def writable(self):
"""
Returns ``True``, indicating that the stream supports :meth:`write`.
"""
self._check_open()
return not self._buf.readonly
def seekable(self):
"""
Returns ``True``, indicating the stream supports :meth:`seek` and
:meth:`tell`.
"""
self._check_open()
return True
def getvalue(self):
"""
Return ``bytes`` containing the entire contents of the buffer.
"""
with self.lock:
return self._buf.tobytes()
def tell(self):
"""
Return the current buffer position.
"""
self._check_open()
return self._pos
def seek(self, offset, whence=io.SEEK_SET):
"""
Change the buffer position to the given byte *offset*. *offset* is
interpreted relative to the position indicated by *whence*. Values for
*whence* are:
* ``SEEK_SET`` or ``0`` start of the buffer (the default); *offset*
should be zero or positive
* ``SEEK_CUR`` or ``1`` current buffer position; *offset* may be
negative
* ``SEEK_END`` or ``2`` end of the buffer; *offset* is usually
negative
Return the new absolute position.
"""
self._check_open()
if whence == io.SEEK_CUR:
offset = self._pos + offset
elif whence == io.SEEK_END:
offset = self.size + offset
if offset < 0:
raise ValueError(
'New position is before the start of the stream')
self._pos = offset
return self._pos
def read(self, n=-1):
"""
Read up to *n* bytes from the buffer and return them. As a convenience,
if *n* is unspecified or -1, :meth:`readall` is called. Fewer than *n*
bytes may be returned if there are fewer than *n* bytes from the
current buffer position to the end of the buffer.
If 0 bytes are returned, and *n* was not 0, this indicates end of the
buffer.
"""
self._check_open()
if n < 0:
return self.readall()
elif n == 0:
return b''
else:
result = self._buf[self._pos:self._pos + n].tobytes()
self._pos += len(result)
return result
def readinto(self, b):
"""
Read bytes into a pre-allocated, writable bytes-like object b, and
return the number of bytes read.
"""
self._check_open()
result = max(0, min(len(b), self._size - self._pos))
if result == 0:
return 0
else:
b[:result] = self._buf[self._pos:self._pos + result]
return result
def readall(self):
"""
Read and return all bytes from the buffer until EOF.
"""
return self.read(max(0, self.size - self._pos))
def truncate(self, size=None):
"""
Raises :exc:`NotImplementedError` as the underlying buffer cannot be
resized.
"""
raise NotImplementedError('cannot resize a BufferIO stream')
def write(self, b):
"""
Write the given bytes or bytearray object, *b*, to the underlying
buffer and return the number of bytes written. If the underlying
buffer isn't large enough to contain all the bytes of *b*, as many
bytes as possible will be written before raising :exc:`IOError`.
"""
self._check_open()
if self._buf.readonly:
raise IOError('buffer object is not writeable')
excess = max(0, len(b) - (self.size - self._pos))
result = len(b) - excess
if excess:
self._buf[self._pos:self._pos + result] = b[:-excess]
else:
self._buf[self._pos:self._pos + result] = b
self._pos += result
return result
class CircularIO(io.IOBase):
"""
A thread-safe stream which uses a ring buffer for storage.
CircularIO provides an in-memory stream similar to the :class:`io.BytesIO`
class. However, unlike :class:`io.BytesIO` its underlying storage is a
`ring buffer`_ with a fixed maximum size. Once the maximum size is reached,
writing effectively loops round to the beginning to the ring and starts
overwriting the oldest content.
Actually, this ring buffer is slightly different to "traditional" ring
buffers. This ring buffer is optimized for camera usage which is expected
to be read-light, write-heavy, and with writes *mostly* aligned to frame
boundaries. Internally, the stream simply references each chunk written and
drops references each time the overall size of the stream would exceed the
specified limit.
As a result the ring buffer doesn't stay strictly at its allocated limit as
traditional ring buffers do. It also drops entire writes when the limit is
reached (this is a desirable behaviour because it means that often whole
frames are dropped from the start of the stream, rather than leaving
partial frames at the start as in a traditional ring buffer). For example:
.. code-block:: pycon
>>> stream = CircularIO(size=10)
>>> stream.write(b'abc')
>>> stream.write(b'def')
>>> stream.getvalue()
b'abcdef'
>>> stream.write(b'ghijk')
>>> stream.getvalue()
b'defghijk'
In a traditional ring buffer, one would expect the last ``getvalue()`` call
to return ``'bcdefghijk'`` as only the first character would be lost at the
limit of 10 bytes. However, this ring buffer has dropped the entire write
of ``'abc'``.
The *size* parameter specifies the maximum size of the stream in bytes. The
:meth:`read`, :meth:`tell`, and :meth:`seek` methods all operate
equivalently to those in :class:`io.BytesIO` whilst :meth:`write` only
differs in the wrapping behaviour described above. A :meth:`read1` method
is also provided for efficient reading of the underlying ring buffer in
write-sized chunks (or less).
A re-entrant threading lock guards all operations, and is accessible for
external use via the :attr:`lock` attribute.
The performance of the class is geared toward faster writing than reading
on the assumption that writing will be the common operation and reading the
rare operation (a reasonable assumption for the camera use-case, but not
necessarily for more general usage).
.. _ring buffer: https://en.wikipedia.org/wiki/Circular_buffer
"""
def __init__(self, size):
if size < 1:
raise ValueError('size must be a positive integer')
self._lock = RLock()
self._data = deque()
self._size = size
self._length = 0
self._pos = 0
self._pos_index = 0
self._pos_offset = 0
def _check_open(self):
if self.closed:
raise ValueError('I/O operation on a closed stream')
@property
def lock(self):
"""
A re-entrant threading lock which is used to guard all operations.
"""
return self._lock
@property
def size(self):
"""
Return the maximum size of the buffer in bytes.
"""
return self._size
def readable(self):
"""
Returns ``True``, indicating that the stream supports :meth:`read`.
"""
self._check_open()
return True
def writable(self):
"""
Returns ``True``, indicating that the stream supports :meth:`write`.
"""
self._check_open()
return True
def seekable(self):
"""
Returns ``True``, indicating the stream supports :meth:`seek` and
:meth:`tell`.
"""
self._check_open()
return True
def getvalue(self):
"""
Return ``bytes`` containing the entire contents of the buffer.
"""
with self.lock:
return b''.join(self._data)
def _set_pos(self, value):
self._pos = value
self._pos_index = -1
self._pos_offset = chunk_pos = 0
for self._pos_index, chunk in enumerate(self._data):
if chunk_pos + len(chunk) > value:
self._pos_offset = value - chunk_pos
return
else:
chunk_pos += len(chunk)
self._pos_index += 1
self._pos_offset = value - chunk_pos
def tell(self):
"""
Return the current stream position.
"""
self._check_open()
with self.lock:
return self._pos
def seek(self, offset, whence=io.SEEK_SET):
"""
Change the stream position to the given byte *offset*. *offset* is
interpreted relative to the position indicated by *whence*. Values for
*whence* are:
* ``SEEK_SET`` or ``0`` start of the stream (the default); *offset*
should be zero or positive
* ``SEEK_CUR`` or ``1`` current stream position; *offset* may be
negative
* ``SEEK_END`` or ``2`` end of the stream; *offset* is usually
negative
Return the new absolute position.
"""
self._check_open()
with self.lock:
if whence == io.SEEK_CUR:
offset = self._pos + offset
elif whence == io.SEEK_END:
offset = self._length + offset
if offset < 0:
raise ValueError(
'New position is before the start of the stream')
self._set_pos(offset)
return self._pos
def read(self, n=-1):
"""
Read up to *n* bytes from the stream and return them. As a convenience,
if *n* is unspecified or -1, :meth:`readall` is called. Fewer than *n*
bytes may be returned if there are fewer than *n* bytes from the
current stream position to the end of the stream.
If 0 bytes are returned, and *n* was not 0, this indicates end of the
stream.
"""
self._check_open()
if n < 0:
return self.readall()
elif n == 0:
return b''
else:
with self.lock:
if self._pos >= self._length:
return b''
from_index, from_offset = self._pos_index, self._pos_offset
self._set_pos(self._pos + n)
result = self._data[from_index][from_offset:from_offset + n]
# Bah ... can't slice a deque
for i in range(from_index + 1, self._pos_index):
result += self._data[i]
if from_index < self._pos_index < len(self._data):
result += self._data[self._pos_index][:self._pos_offset]
return result
def readall(self):
"""
Read and return all bytes from the stream until EOF, using multiple
calls to the stream if necessary.
"""
return self.read(max(0, self._length - self._pos))
def read1(self, n=-1):
"""
Read up to *n* bytes from the stream using only a single call to the
underlying object.
In the case of :class:`CircularIO` this roughly corresponds to
returning the content from the current position up to the end of the
write that added that content to the stream (assuming no subsequent
writes overwrote the content). :meth:`read1` is particularly useful
for efficient copying of the stream's content.
"""
self._check_open()
with self.lock:
if self._pos == self._length:
return b''
chunk = self._data[self._pos_index]
if n == -1:
n = len(chunk) - self._pos_offset
result = chunk[self._pos_offset:self._pos_offset + n]
self._pos += len(result)
self._pos_offset += n
if self._pos_offset >= len(chunk):
self._pos_index += 1
self._pos_offset = 0
return result
def truncate(self, size=None):
"""
Resize the stream to the given *size* in bytes (or the current position
if *size* is not specified). This resizing can extend or reduce the
current stream size. In case of extension, the contents of the new file
area will be NUL (``\\x00``) bytes. The new stream size is returned.
The current stream position isnt changed unless the resizing is
expanding the stream, in which case it may be set to the maximum stream
size if the expansion causes the ring buffer to loop around.
"""
self._check_open()
with self.lock:
if size is None:
size = self._pos
if size < 0:
raise ValueError('size must be zero, or a positive integer')
if size > self._length:
# Backfill the space between stream end and current position
# with NUL bytes
fill = b'\x00' * (size - self._length)
self._set_pos(self._length)
self.write(fill)
elif size < self._length:
# Lop off chunks until we get to the last one at the truncation
# point, and slice that one
save_pos = self._pos
self._set_pos(size)
while self._pos_index < len(self._data) - 1:
self._data.pop()
if self._pos_offset > 0:
self._data[self._pos_index] = self._data[self._pos_index][:self._pos_offset]
self._pos_index += 1
self._pos_offset = 0
else:
self._data.pop()
self._length = size
if self._pos != save_pos:
self._set_pos(save_pos)
def write(self, b):
"""
Write the given bytes or bytearray object, *b*, to the underlying
stream and return the number of bytes written.
"""
self._check_open()
b = bytes(b)
with self.lock:
# Special case: stream position is beyond the end of the stream.
# Call truncate to backfill space first
if self._pos > self._length:
self.truncate()
result = len(b)
if self._pos == self._length:
# Fast path: stream position is at the end of the stream so
# just append a new chunk
self._data.append(b)
self._length += len(b)
self._pos = self._length
self._pos_index = len(self._data)
self._pos_offset = 0
else:
# Slow path: stream position is somewhere in the middle;
# overwrite bytes in the current (and if necessary, subsequent)
# chunk(s), without extending them. If we reach the end of the
# stream, call ourselves recursively to continue down the fast
# path
while b and (self._pos < self._length):
chunk = self._data[self._pos_index]
head = b[:len(chunk) - self._pos_offset]
assert head
b = b[len(head):]
self._data[self._pos_index] = b''.join((
chunk[:self._pos_offset],
head,
chunk[self._pos_offset + len(head):]
))
self._pos += len(head)
if self._pos_offset + len(head) >= len(chunk):
self._pos_index += 1
self._pos_offset = 0
else:
self._pos_offset += len(head)
if b:
self.write(b)
# If the stream is now beyond the specified size limit, remove
# whole chunks until the size is within the limit again
while self._length > self._size:
chunk = self._data.popleft()
self._length -= len(chunk)
self._pos -= len(chunk)
self._pos_index -= 1
# no need to adjust self._pos_offset
return result
class PiCameraDequeHack(deque):
def __init__(self, stream):
super(PiCameraDequeHack, self).__init__()
self.stream = ref(stream) # avoid a circular ref
def append(self, item):
# Include the frame's metadata.
frame = self.stream()._get_frame()
return super(PiCameraDequeHack, self).append((item, frame))
def pop(self):
return super(PiCameraDequeHack, self).pop()[0]
def popleft(self):
return super(PiCameraDequeHack, self).popleft()[0]
def __getitem__(self, index):
return super(PiCameraDequeHack, self).__getitem__(index)[0]
def __setitem__(self, index, value):
frame = super(PiCameraDequeHack, self).__getitem__(index)[1]
return super(PiCameraDequeHack, self).__setitem__(index, (value, frame))
def __iter__(self):
for item, frame in self.iter_both(False):
yield item
def __reversed__(self):
for item, frame in self.iter_both(True):
yield item
def iter_both(self, reverse):
if reverse:
return super(PiCameraDequeHack, self).__reversed__()
else:
return super(PiCameraDequeHack, self).__iter__()
class PiCameraDequeFrames(object):
def __init__(self, stream):
super(PiCameraDequeFrames, self).__init__()
self.stream = ref(stream) # avoid a circular ref
def __iter__(self):
with self.stream().lock:
pos = 0
for item, frame in self.stream()._data.iter_both(False):
pos += len(item)
if frame:
# Rewrite the video_size and split_size attributes
# according to the current position of the chunk
frame = PiVideoFrame(
index=frame.index,
frame_type=frame.frame_type,
frame_size=frame.frame_size,
video_size=pos,
split_size=pos,
timestamp=frame.timestamp,
complete=frame.complete,
)
# Only yield the frame meta-data if the start of the frame
# still exists in the stream
if pos - frame.frame_size >= 0:
yield frame
def __reversed__(self):
with self.stream().lock:
pos = self.stream()._length
for item, frame in self.stream()._data.iter_both(True):
if frame:
frame = PiVideoFrame(
index=frame.index,
frame_type=frame.frame_type,
frame_size=frame.frame_size,
video_size=pos,
split_size=pos,
timestamp=frame.timestamp,
complete=frame.complete,
)
if pos - frame.frame_size >= 0:
yield frame
pos -= len(item)
class PiCameraCircularIO(CircularIO):
"""
A derivative of :class:`CircularIO` which tracks camera frames.
PiCameraCircularIO provides an in-memory stream based on a ring buffer. It
is a specialization of :class:`CircularIO` which associates video frame
meta-data with the recorded stream, accessible from the :attr:`frames`
property.
.. warning::
The class makes a couple of assumptions which will cause the frame
meta-data tracking to break if they are not adhered to:
* the stream is only ever appended to - no writes ever start from
the middle of the stream
* the stream is never truncated (from the right; being ring buffer
based, left truncation will occur automatically); the exception
to this is the :meth:`clear` method.
The *camera* parameter specifies the :class:`PiCamera` instance that will
be recording video to the stream. If specified, the *size* parameter
determines the maximum size of the stream in bytes. If *size* is not
specified (or ``None``), then *seconds* must be specified instead. This
provides the maximum length of the stream in seconds, assuming a data rate
in bits-per-second given by the *bitrate* parameter (which defaults to
``17000000``, or 17Mbps, which is also the default bitrate used for video
recording by :class:`PiCamera`). You cannot specify both *size* and
*seconds*.
The *splitter_port* parameter specifies the port of the built-in splitter
that the video encoder will be attached to. This defaults to ``1`` and most
users will have no need to specify anything different. If you do specify
something else, ensure it is equal to the *splitter_port* parameter of the
corresponding call to :meth:`~PiCamera.start_recording`. For example::
import picamera
with picamera.PiCamera() as camera:
with picamera.PiCameraCircularIO(camera, splitter_port=2) as stream:
camera.start_recording(stream, format='h264', splitter_port=2)
camera.wait_recording(10, splitter_port=2)
camera.stop_recording(splitter_port=2)
.. attribute:: frames
Returns an iterator over the frame meta-data.
As the camera records video to the stream, the class captures the
meta-data associated with each frame (in the form of a
:class:`PiVideoFrame` tuple), discarding meta-data for frames which are
no longer fully stored within the underlying ring buffer. You can use
the frame meta-data to locate, for example, the first keyframe present
in the stream in order to determine an appropriate range to extract.
"""
def __init__(
self, camera, size=None, seconds=None, bitrate=17000000,
splitter_port=1):
if size is None and seconds is None:
raise PiCameraValueError('You must specify either size, or seconds')
if size is not None and seconds is not None:
raise PiCameraValueError('You cannot specify both size and seconds')
if seconds is not None:
size = bitrate * seconds // 8
super(PiCameraCircularIO, self).__init__(size)
try:
camera._encoders
except AttributeError:
raise PiCameraValueError('camera must be a valid PiCamera object')
self.camera = camera
self.splitter_port = splitter_port
self._data = PiCameraDequeHack(self)
self._frames = PiCameraDequeFrames(self)
def _get_frame(self):
"""
Return frame metadata from latest frame, when it is complete.
"""
encoder = self.camera._encoders[self.splitter_port]
return encoder.frame if encoder.frame.complete else None
@property
def frames(self):
"""
An iterable which contains the meta-data (:class:`PiVideoFrame`
objects) for all complete frames currently stored in the circular
buffer.
"""
return self._frames
def clear(self):
"""
Resets the stream to empty safely.
This method truncates the stream to empty, and clears the associated
frame meta-data too, ensuring that subsequent writes operate correctly
(see the warning in the :class:`PiCameraCircularIO` class
documentation).
"""
with self.lock:
self.seek(0)
self.truncate()
def _find(self, field, criteria, first_frame):
first = last = None
attr = attrgetter(field)
for frame in reversed(self._frames):
if last is None:
last = frame
if first_frame in (None, frame.frame_type):
first = frame
if last is not None and attr(last) - attr(frame) >= criteria:
break
if last is not None and attr(last) - attr(frame) >= criteria:
break
return first, last
def _find_all(self, first_frame):
chunks = []
first = last = None
for frame in reversed(self._frames):
last = frame
break
for frame in self._frames:
if first_frame in (None, frame.frame_type):
first = frame
break
return first, last
def copy_to(
self, output, size=None, seconds=None, frames=None,
first_frame=PiVideoFrameType.sps_header):
"""
copy_to(output, size=None, seconds=None, frames=None, first_frame=PiVideoFrameType.sps_header)
Copies content from the stream to *output*.
By default, this method copies all complete frames from the circular
stream to the filename or file-like object given by *output*.
If *size* is specified then the copy will be limited to the whole
number of frames that fit within the specified number of bytes. If
*seconds* if specified, then the copy will be limited to that number of
seconds worth of frames. If *frames* is specified then the copy will
be limited to that number of frames. Only one of *size*, *seconds*, or
*frames* can be specified. If none is specified, all frames are copied.
If *first_frame* is specified, it defines the frame type of the first
frame to be copied. By default this is
:attr:`~PiVideoFrameType.sps_header` as this must usually be the first
frame in an H264 stream. If *first_frame* is ``None``, not such limit
will be applied.
.. warning::
Note that if a frame of the specified type (e.g. SPS header) cannot
be found within the specified number of seconds, bytes, or frames,
then this method will simply copy nothing (but no error will be
raised).
The stream's position is not affected by this method.
"""
if (size, seconds, frames).count(None) < 2:
raise PiCameraValueError(
'You can only specify one of size, seconds, or frames')
if isinstance(output, bytes):
output = output.decode('utf-8')
opened = isinstance(output, str)
if opened:
output = io.open(output, 'wb')
try:
with self.lock:
if size is not None:
first, last = self._find('video_size', size, first_frame)
elif seconds is not None:
seconds = int(seconds * 1000000)
first, last = self._find('timestamp', seconds, first_frame)
elif frames is not None:
first, last = self._find('index', frames, first_frame)
else:
first, last = self._find_all(first_frame)
# Copy chunk references into a holding buffer; this allows us
# to release the lock on the stream quickly (in case recording
# is on-going)
chunks = []
if first is not None and last is not None:
pos = 0
for buf, frame in self._data.iter_both(False):
if pos > last.position + last.frame_size:
break
elif pos >= first.position:
chunks.append(buf)
pos += len(buf)
# Perform the actual I/O, copying chunks to the output
for buf in chunks:
output.write(buf)
return first, last
finally:
if opened:
output.close()