summaryrefslogtreecommitdiff
path: root/drivers/media/v4l2-core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r--drivers/media/v4l2-core/tuner-core.c54
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c313
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c25
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c634
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c18
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c202
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-flash-led-class.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c845
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c58
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c147
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c67
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c11
16 files changed, 1921 insertions, 519 deletions
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 7f858c39753c..03a340cb5a9b 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -94,9 +94,56 @@ static const struct v4l2_subdev_ops tuner_ops;
} while (0)
/*
- * Internal struct used inside the driver
+ * Internal enums/struct used inside the driver
*/
+/**
+ * enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER
+ *
+ * @TUNER_PAD_RF_INPUT:
+ * Radiofrequency (RF) sink pad, usually linked to a RF connector entity.
+ * @TUNER_PAD_OUTPUT:
+ * tuner video output source pad. Contains the video chrominance
+ * and luminance or the hole bandwidth of the signal converted to
+ * an Intermediate Frequency (IF) or to baseband (on zero-IF tuners).
+ * @TUNER_PAD_AUD_OUT:
+ * Tuner audio output source pad. Tuners used to decode analog TV
+ * signals have an extra pad for audio output. Old tuners use an
+ * analog stage with a saw filter for the audio IF frequency. The
+ * output of the pad is, in this case, the audio IF, with should be
+ * decoded either by the bridge chipset (that's the case of cx2388x
+ * chipsets) or may require an external IF sound processor, like
+ * msp34xx. On modern silicon tuners, the audio IF decoder is usually
+ * incorporated at the tuner. On such case, the output of this pad
+ * is an audio sampled data.
+ * @TUNER_NUM_PADS:
+ * Number of pads of the tuner.
+ */
+enum tuner_pad_index {
+ TUNER_PAD_RF_INPUT,
+ TUNER_PAD_OUTPUT,
+ TUNER_PAD_AUD_OUT,
+ TUNER_NUM_PADS
+};
+
+/**
+ * enum if_vid_dec_pad_index - video IF-PLL pad index
+ * for MEDIA_ENT_F_IF_VID_DECODER
+ *
+ * @IF_VID_DEC_PAD_IF_INPUT:
+ * video Intermediate Frequency (IF) sink pad
+ * @IF_VID_DEC_PAD_OUT:
+ * IF-PLL video output source pad. Contains the video chrominance
+ * and luminance IF signals.
+ * @IF_VID_DEC_PAD_NUM_PADS:
+ * Number of pads of the video IF-PLL.
+ */
+enum if_vid_dec_pad_index {
+ IF_VID_DEC_PAD_IF_INPUT,
+ IF_VID_DEC_PAD_OUT,
+ IF_VID_DEC_PAD_NUM_PADS
+};
+
struct tuner {
/* device */
struct dvb_frontend fe;
@@ -685,15 +732,20 @@ register_client:
*/
if (t->type == TUNER_TDA9887) {
t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ t->pad[IF_VID_DEC_PAD_IF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ t->pad[IF_VID_DEC_PAD_OUT].sig_type = PAD_SIGNAL_ANALOG;
ret = media_entity_pads_init(&t->sd.entity,
IF_VID_DEC_PAD_NUM_PADS,
&t->pad[0]);
t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER;
} else {
t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ t->pad[TUNER_PAD_RF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
+ t->pad[TUNER_PAD_OUTPUT].sig_type = PAD_SIGNAL_ANALOG;
t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ t->pad[TUNER_PAD_AUD_OUT].sig_type = PAD_SIGNAL_AUDIO;
ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS,
&t->pad[0]);
t->sd.entity.function = MEDIA_ENT_F_TUNER;
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index 2b08d03b251d..a6d91370838d 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -57,6 +57,7 @@ static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
#if IS_ENABLED(CONFIG_I2C)
struct i2c_client *client = i2c_verify_client(sd->dev);
+
return client &&
asd->match.i2c.adapter_id == client->adapter->nr &&
asd->match.i2c.address == client->addr;
@@ -89,10 +90,11 @@ static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);
-static struct v4l2_async_subdev *v4l2_async_find_match(
- struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd)
+static struct v4l2_async_subdev *
+v4l2_async_find_match(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd)
{
- bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
+ bool (*match)(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
struct v4l2_async_subdev *asd;
list_for_each_entry(asd, &notifier->waiting, list) {
@@ -124,9 +126,34 @@ static struct v4l2_async_subdev *v4l2_async_find_match(
return NULL;
}
+/* Compare two async sub-device descriptors for equivalence */
+static bool asd_equal(struct v4l2_async_subdev *asd_x,
+ struct v4l2_async_subdev *asd_y)
+{
+ if (asd_x->match_type != asd_y->match_type)
+ return false;
+
+ switch (asd_x->match_type) {
+ case V4L2_ASYNC_MATCH_DEVNAME:
+ return strcmp(asd_x->match.device_name,
+ asd_y->match.device_name) == 0;
+ case V4L2_ASYNC_MATCH_I2C:
+ return asd_x->match.i2c.adapter_id ==
+ asd_y->match.i2c.adapter_id &&
+ asd_x->match.i2c.address ==
+ asd_y->match.i2c.address;
+ case V4L2_ASYNC_MATCH_FWNODE:
+ return asd_x->match.fwnode == asd_y->match.fwnode;
+ default:
+ break;
+ }
+
+ return false;
+}
+
/* Find the sub-device notifier registered by a sub-device driver. */
-static struct v4l2_async_notifier *v4l2_async_find_subdev_notifier(
- struct v4l2_subdev *sd)
+static struct v4l2_async_notifier *
+v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
{
struct v4l2_async_notifier *n;
@@ -138,8 +165,8 @@ static struct v4l2_async_notifier *v4l2_async_find_subdev_notifier(
}
/* Get v4l2_device related to the notifier if one can be found. */
-static struct v4l2_device *v4l2_async_notifier_find_v4l2_dev(
- struct v4l2_async_notifier *notifier)
+static struct v4l2_device *
+v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
{
while (notifier->parent)
notifier = notifier->parent;
@@ -150,8 +177,8 @@ static struct v4l2_device *v4l2_async_notifier_find_v4l2_dev(
/*
* Return true if all child sub-device notifiers are complete, false otherwise.
*/
-static bool v4l2_async_notifier_can_complete(
- struct v4l2_async_notifier *notifier)
+static bool
+v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
{
struct v4l2_subdev *sd;
@@ -174,8 +201,8 @@ static bool v4l2_async_notifier_can_complete(
* Complete the master notifier if possible. This is done when all async
* sub-devices have been bound; v4l2_device is also available then.
*/
-static int v4l2_async_notifier_try_complete(
- struct v4l2_async_notifier *notifier)
+static int
+v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
{
/* Quick check whether there are still more sub-devices here. */
if (!list_empty(&notifier->waiting))
@@ -196,8 +223,8 @@ static int v4l2_async_notifier_try_complete(
return v4l2_async_notifier_call_complete(notifier);
}
-static int v4l2_async_notifier_try_all_subdevs(
- struct v4l2_async_notifier *notifier);
+static int
+v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier);
static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
struct v4l2_device *v4l2_dev,
@@ -243,8 +270,8 @@ static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
}
/* Test all async sub-devices in a notifier for a match. */
-static int v4l2_async_notifier_try_all_subdevs(
- struct v4l2_async_notifier *notifier)
+static int
+v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier)
{
struct v4l2_device *v4l2_dev =
v4l2_async_notifier_find_v4l2_dev(notifier);
@@ -281,14 +308,17 @@ again:
static void v4l2_async_cleanup(struct v4l2_subdev *sd)
{
v4l2_device_unregister_subdev(sd);
- /* Subdevice driver will reprobe and put the subdev back onto the list */
+ /*
+ * Subdevice driver will reprobe and put the subdev back
+ * onto the list
+ */
list_del_init(&sd->async_list);
sd->asd = NULL;
}
/* Unbind all sub-devices in the notifier tree. */
-static void v4l2_async_notifier_unbind_all_subdevs(
- struct v4l2_async_notifier *notifier)
+static void
+v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
{
struct v4l2_subdev *sd, *tmp;
@@ -308,29 +338,23 @@ static void v4l2_async_notifier_unbind_all_subdevs(
notifier->parent = NULL;
}
-/* See if an fwnode can be found in a notifier's lists. */
-static bool __v4l2_async_notifier_fwnode_has_async_subdev(
- struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode)
+/* See if an async sub-device can be found in a notifier's lists. */
+static bool
+__v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd)
{
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_subdev *asd_y;
struct v4l2_subdev *sd;
- list_for_each_entry(asd, &notifier->waiting, list) {
- if (asd->match_type != V4L2_ASYNC_MATCH_FWNODE)
- continue;
-
- if (asd->match.fwnode == fwnode)
+ list_for_each_entry(asd_y, &notifier->waiting, list)
+ if (asd_equal(asd, asd_y))
return true;
- }
list_for_each_entry(sd, &notifier->done, async_list) {
if (WARN_ON(!sd->asd))
continue;
- if (sd->asd->match_type != V4L2_ASYNC_MATCH_FWNODE)
- continue;
-
- if (sd->asd->match.fwnode == fwnode)
+ if (asd_equal(asd, sd->asd))
return true;
}
@@ -338,76 +362,91 @@ static bool __v4l2_async_notifier_fwnode_has_async_subdev(
}
/*
- * Find out whether an async sub-device was set up for an fwnode already or
+ * Find out whether an async sub-device was set up already or
* whether it exists in a given notifier before @this_index.
+ * If @this_index < 0, search the notifier's entire @asd_list.
*/
-static bool v4l2_async_notifier_fwnode_has_async_subdev(
- struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode,
- unsigned int this_index)
+static bool
+v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd,
+ int this_index)
{
- unsigned int j;
+ struct v4l2_async_subdev *asd_y;
+ int j = 0;
lockdep_assert_held(&list_lock);
- /* Check that an fwnode is not being added more than once. */
- for (j = 0; j < this_index; j++) {
- struct v4l2_async_subdev *asd = notifier->subdevs[this_index];
- struct v4l2_async_subdev *other_asd = notifier->subdevs[j];
-
- if (other_asd->match_type == V4L2_ASYNC_MATCH_FWNODE &&
- asd->match.fwnode ==
- other_asd->match.fwnode)
+ /* Check that an asd is not being added more than once. */
+ list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
+ if (this_index >= 0 && j++ >= this_index)
+ break;
+ if (asd_equal(asd, asd_y))
return true;
}
- /* Check than an fwnode did not exist in other notifiers. */
+ /* Check that an asd does not exist in other notifiers. */
list_for_each_entry(notifier, &notifier_list, list)
- if (__v4l2_async_notifier_fwnode_has_async_subdev(
- notifier, fwnode))
+ if (__v4l2_async_notifier_has_async_subdev(notifier, asd))
return true;
return false;
}
-static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
+static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd,
+ int this_index)
{
struct device *dev =
notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
- struct v4l2_async_subdev *asd;
- int ret;
- int i;
- if (notifier->num_subdevs > V4L2_MAX_SUBDEVS)
+ if (!asd)
+ return -EINVAL;
+
+ switch (asd->match_type) {
+ case V4L2_ASYNC_MATCH_CUSTOM:
+ case V4L2_ASYNC_MATCH_DEVNAME:
+ case V4L2_ASYNC_MATCH_I2C:
+ case V4L2_ASYNC_MATCH_FWNODE:
+ if (v4l2_async_notifier_has_async_subdev(notifier, asd,
+ this_index)) {
+ dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
+ return -EEXIST;
+ }
+ break;
+ default:
+ dev_err(dev, "Invalid match type %u on %p\n",
+ asd->match_type, asd);
return -EINVAL;
+ }
+
+ return 0;
+}
+
+void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
+{
+ mutex_lock(&list_lock);
+
+ INIT_LIST_HEAD(&notifier->asd_list);
+
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(v4l2_async_notifier_init);
+
+static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_subdev *asd;
+ int ret, i = 0;
INIT_LIST_HEAD(&notifier->waiting);
INIT_LIST_HEAD(&notifier->done);
mutex_lock(&list_lock);
- for (i = 0; i < notifier->num_subdevs; i++) {
- asd = notifier->subdevs[i];
-
- switch (asd->match_type) {
- case V4L2_ASYNC_MATCH_CUSTOM:
- case V4L2_ASYNC_MATCH_DEVNAME:
- case V4L2_ASYNC_MATCH_I2C:
- break;
- case V4L2_ASYNC_MATCH_FWNODE:
- if (v4l2_async_notifier_fwnode_has_async_subdev(
- notifier, asd->match.fwnode, i)) {
- dev_err(dev,
- "fwnode has already been registered or in notifier's subdev list\n");
- ret = -EEXIST;
- goto err_unlock;
- }
- break;
- default:
- dev_err(dev, "Invalid match type %u on %p\n",
- asd->match_type, asd);
- ret = -EINVAL;
+ list_for_each_entry(asd, &notifier->asd_list, asd_list) {
+ ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
+ if (ret)
goto err_unlock;
- }
+
list_add_tail(&asd->list, &notifier->waiting);
}
@@ -474,8 +513,8 @@ int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
}
EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
-static void __v4l2_async_notifier_unregister(
- struct v4l2_async_notifier *notifier)
+static void
+__v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
{
if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
return;
@@ -498,36 +537,132 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
}
EXPORT_SYMBOL(v4l2_async_notifier_unregister);
-void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
+static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
{
- unsigned int i;
+ struct v4l2_async_subdev *asd, *tmp;
- if (!notifier || !notifier->max_subdevs)
+ if (!notifier)
return;
- for (i = 0; i < notifier->num_subdevs; i++) {
- struct v4l2_async_subdev *asd = notifier->subdevs[i];
-
+ list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
switch (asd->match_type) {
case V4L2_ASYNC_MATCH_FWNODE:
fwnode_handle_put(asd->match.fwnode);
break;
default:
- WARN_ON_ONCE(true);
break;
}
+ list_del(&asd->asd_list);
kfree(asd);
}
+}
- notifier->max_subdevs = 0;
- notifier->num_subdevs = 0;
+void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
+{
+ mutex_lock(&list_lock);
+
+ __v4l2_async_notifier_cleanup(notifier);
- kvfree(notifier->subdevs);
- notifier->subdevs = NULL;
+ mutex_unlock(&list_lock);
}
EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
+int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd)
+{
+ int ret;
+
+ mutex_lock(&list_lock);
+
+ ret = v4l2_async_notifier_asd_valid(notifier, asd, -1);
+ if (ret)
+ goto unlock;
+
+ list_add_tail(&asd->asd_list, &notifier->asd_list);
+
+unlock:
+ mutex_unlock(&list_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_subdev);
+
+struct v4l2_async_subdev *
+v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *fwnode,
+ unsigned int asd_struct_size)
+{
+ struct v4l2_async_subdev *asd;
+ int ret;
+
+ asd = kzalloc(asd_struct_size, GFP_KERNEL);
+ if (!asd)
+ return ERR_PTR(-ENOMEM);
+
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ asd->match.fwnode = fwnode;
+
+ ret = v4l2_async_notifier_add_subdev(notifier, asd);
+ if (ret) {
+ kfree(asd);
+ return ERR_PTR(ret);
+ }
+
+ return asd;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_subdev);
+
+struct v4l2_async_subdev *
+v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
+ int adapter_id, unsigned short address,
+ unsigned int asd_struct_size)
+{
+ struct v4l2_async_subdev *asd;
+ int ret;
+
+ asd = kzalloc(asd_struct_size, GFP_KERNEL);
+ if (!asd)
+ return ERR_PTR(-ENOMEM);
+
+ asd->match_type = V4L2_ASYNC_MATCH_I2C;
+ asd->match.i2c.adapter_id = adapter_id;
+ asd->match.i2c.address = address;
+
+ ret = v4l2_async_notifier_add_subdev(notifier, asd);
+ if (ret) {
+ kfree(asd);
+ return ERR_PTR(ret);
+ }
+
+ return asd;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_i2c_subdev);
+
+struct v4l2_async_subdev *
+v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier,
+ const char *device_name,
+ unsigned int asd_struct_size)
+{
+ struct v4l2_async_subdev *asd;
+ int ret;
+
+ asd = kzalloc(asd_struct_size, GFP_KERNEL);
+ if (!asd)
+ return ERR_PTR(-ENOMEM);
+
+ asd->match_type = V4L2_ASYNC_MATCH_DEVNAME;
+ asd->match.device_name = device_name;
+
+ ret = v4l2_async_notifier_add_subdev(notifier, asd);
+ if (ret) {
+ kfree(asd);
+ return ERR_PTR(ret);
+ }
+
+ return asd;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_devname_subdev);
+
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
struct v4l2_async_notifier *subdev_notifier;
@@ -601,7 +736,7 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
mutex_lock(&list_lock);
__v4l2_async_notifier_unregister(sd->subdev_notifier);
- v4l2_async_notifier_cleanup(sd->subdev_notifier);
+ __v4l2_async_notifier_cleanup(sd->subdev_notifier);
kfree(sd->subdev_notifier);
sd->subdev_notifier = NULL;
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index b518b92d6d96..50763fb42a1b 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -100,7 +100,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _
qctrl->step = step;
qctrl->default_value = def;
qctrl->reserved[0] = qctrl->reserved[1] = 0;
- strlcpy(qctrl->name, name, sizeof(qctrl->name));
+ strscpy(qctrl->name, name, sizeof(qctrl->name));
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_query_fill);
@@ -109,6 +109,19 @@ EXPORT_SYMBOL(v4l2_ctrl_query_fill);
#if IS_ENABLED(CONFIG_I2C)
+void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, struct i2c_client *client,
+ const char *devname, const char *postfix)
+{
+ if (!devname)
+ devname = client->dev.driver->name;
+ if (!postfix)
+ postfix = "";
+
+ snprintf(sd->name, sizeof(sd->name), "%s%s %d-%04x", devname, postfix,
+ i2c_adapter_id(client->adapter), client->addr);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_set_name);
+
void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
const struct v4l2_subdev_ops *ops)
{
@@ -120,10 +133,7 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
/* i2c_client and v4l2_subdev point to one another */
v4l2_set_subdevdata(sd, client);
i2c_set_clientdata(client, sd);
- /* initialize name */
- snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
- client->dev.driver->name, i2c_adapter_id(client->adapter),
- client->addr);
+ v4l2_i2c_subdev_set_name(sd, client, NULL, NULL);
}
EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
@@ -186,7 +196,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
/* Setup the i2c board info with the device type and
the device address. */
memset(&info, 0, sizeof(info));
- strlcpy(info.type, client_type, sizeof(info.type));
+ strscpy(info.type, client_type, sizeof(info.type));
info.addr = addr;
return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
@@ -255,7 +265,8 @@ void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
v4l2_set_subdevdata(sd, spi);
spi_set_drvdata(spi, sd);
/* initialize name */
- strlcpy(sd->name, spi->dev.driver->name, sizeof(sd->name));
+ snprintf(sd->name, sizeof(sd->name), "%s %s",
+ spi->dev.driver->name, dev_name(&spi->dev));
}
EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 6481212fda77..f4325329fbd6 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -244,6 +244,7 @@ struct v4l2_format32 {
* return: number of created buffers
* @memory: buffer memory type
* @format: frame format, for which buffers are requested
+ * @capabilities: capabilities of this buffer type.
* @reserved: future extensions
*/
struct v4l2_create_buffers32 {
@@ -251,7 +252,8 @@ struct v4l2_create_buffers32 {
__u32 count;
__u32 memory; /* enum v4l2_memory */
struct v4l2_format32 format;
- __u32 reserved[8];
+ __u32 capabilities;
+ __u32 reserved[7];
};
static int __bufsize_v4l2_format(struct v4l2_format32 __user *p32, u32 *size)
@@ -411,6 +413,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers __user *p64,
if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
copy_in_user(p32, p64,
offsetof(struct v4l2_create_buffers32, format)) ||
+ assign_in_user(&p32->capabilities, &p64->capabilities) ||
copy_in_user(p32->reserved, p64->reserved, sizeof(p64->reserved)))
return -EFAULT;
return __put_v4l2_format32(&p64->format, &p32->format);
@@ -482,7 +485,7 @@ struct v4l2_buffer32 {
} m;
__u32 length;
__u32 reserved2;
- __u32 reserved;
+ __s32 request_fd;
};
static int get_v4l2_plane32(struct v4l2_plane __user *p64,
@@ -581,6 +584,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64,
{
u32 type;
u32 length;
+ s32 request_fd;
enum v4l2_memory memory;
struct v4l2_plane32 __user *uplane32;
struct v4l2_plane __user *uplane;
@@ -595,7 +599,9 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64,
get_user(memory, &p32->memory) ||
put_user(memory, &p64->memory) ||
get_user(length, &p32->length) ||
- put_user(length, &p64->length))
+ put_user(length, &p64->length) ||
+ get_user(request_fd, &p32->request_fd) ||
+ put_user(request_fd, &p64->request_fd))
return -EFAULT;
if (V4L2_TYPE_IS_OUTPUT(type))
@@ -699,7 +705,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer __user *p64,
copy_in_user(&p32->timecode, &p64->timecode, sizeof(p64->timecode)) ||
assign_in_user(&p32->sequence, &p64->sequence) ||
assign_in_user(&p32->reserved2, &p64->reserved2) ||
- assign_in_user(&p32->reserved, &p64->reserved) ||
+ assign_in_user(&p32->request_fd, &p64->request_fd) ||
get_user(length, &p64->length) ||
put_user(length, &p32->length))
return -EFAULT;
@@ -834,7 +840,8 @@ struct v4l2_ext_controls32 {
__u32 which;
__u32 count;
__u32 error_idx;
- __u32 reserved[2];
+ __s32 request_fd;
+ __u32 reserved[1];
compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
};
@@ -909,6 +916,7 @@ static int get_v4l2_ext_controls32(struct file *file,
get_user(count, &p32->count) ||
put_user(count, &p64->count) ||
assign_in_user(&p64->error_idx, &p32->error_idx) ||
+ assign_in_user(&p64->request_fd, &p32->request_fd) ||
copy_in_user(p64->reserved, p32->reserved, sizeof(p64->reserved)))
return -EFAULT;
@@ -974,6 +982,7 @@ static int put_v4l2_ext_controls32(struct file *file,
get_user(count, &p64->count) ||
put_user(count, &p32->count) ||
assign_in_user(&p32->error_idx, &p64->error_idx) ||
+ assign_in_user(&p32->request_fd, &p64->request_fd) ||
copy_in_user(p32->reserved, p64->reserved, sizeof(p32->reserved)) ||
get_user(kcontrols, &p64->controls))
return -EFAULT;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 599c1cbff3b9..6e37950292cd 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -37,8 +37,8 @@
struct v4l2_ctrl_helper {
/* Pointer to the control reference of the master control */
struct v4l2_ctrl_ref *mref;
- /* The control corresponding to the v4l2_ext_control ID field. */
- struct v4l2_ctrl *ctrl;
+ /* The control ref corresponding to the v4l2_ext_control ID field. */
+ struct v4l2_ctrl_ref *ref;
/* v4l2_ext_control index of the next control belonging to the
same cluster, or 0 if there isn't any. */
u32 next;
@@ -844,6 +844,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS: return "MPEG-2 Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION: return "MPEG-2 Quantization Matrices";
/* VPX controls */
case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
@@ -1292,6 +1294,12 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_RDS_TX_ALT_FREQS:
*type = V4L2_CTRL_TYPE_U32;
break;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION:
+ *type = V4L2_CTRL_TYPE_MPEG2_QUANTIZATION;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -1550,6 +1558,7 @@ static void std_log(const struct v4l2_ctrl *ctrl)
static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
union v4l2_ctrl_ptr ptr)
{
+ struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
size_t len;
u64 offset;
s64 val;
@@ -1612,6 +1621,54 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
return -ERANGE;
return 0;
+ case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
+ p_mpeg2_slice_params = ptr.p;
+
+ switch (p_mpeg2_slice_params->sequence.chroma_format) {
+ case 1: /* 4:2:0 */
+ case 2: /* 4:2:2 */
+ case 3: /* 4:4:4 */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.intra_dc_precision) {
+ case 0: /* 8 bits */
+ case 1: /* 9 bits */
+ case 11: /* 11 bits */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.picture_structure) {
+ case 1: /* interlaced top field */
+ case 2: /* interlaced bottom field */
+ case 3: /* progressive */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.picture_coding_type) {
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_I:
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (p_mpeg2_slice_params->backward_ref_index >= VIDEO_MAX_FRAME ||
+ p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME)
+ return -EINVAL;
+
+ return 0;
+
+ case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
+ return 0;
+
default:
return -EINVAL;
}
@@ -1668,6 +1725,13 @@ static int new_to_user(struct v4l2_ext_control *c,
return ptr_to_user(c, ctrl, ctrl->p_new);
}
+/* Helper function: copy the request value back to the caller */
+static int req_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl_ref *ref)
+{
+ return ptr_to_user(c, ref->ctrl, ref->p_req);
+}
+
/* Helper function: copy the initial control value back to the caller */
static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
{
@@ -1787,6 +1851,26 @@ static void cur_to_new(struct v4l2_ctrl *ctrl)
ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new);
}
+/* Copy the new value to the request value */
+static void new_to_req(struct v4l2_ctrl_ref *ref)
+{
+ if (!ref)
+ return;
+ ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
+ ref->req = ref;
+}
+
+/* Copy the request value to the new value */
+static void req_to_new(struct v4l2_ctrl_ref *ref)
+{
+ if (!ref)
+ return;
+ if (ref->req)
+ ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
+ else
+ ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
+}
+
/* Return non-zero if one or more of the controls in the cluster has a new
value that differs from the current value. */
static int cluster_changed(struct v4l2_ctrl *master)
@@ -1896,11 +1980,15 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
lockdep_set_class_and_name(hdl->lock, key, name);
INIT_LIST_HEAD(&hdl->ctrls);
INIT_LIST_HEAD(&hdl->ctrl_refs);
+ INIT_LIST_HEAD(&hdl->requests);
+ INIT_LIST_HEAD(&hdl->requests_queued);
+ hdl->request_is_queued = false;
hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
hdl->buckets = kvmalloc_array(hdl->nr_of_buckets,
sizeof(hdl->buckets[0]),
GFP_KERNEL | __GFP_ZERO);
hdl->error = hdl->buckets ? 0 : -ENOMEM;
+ media_request_object_init(&hdl->req_obj);
return hdl->error;
}
EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
@@ -1915,6 +2003,14 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
if (hdl == NULL || hdl->buckets == NULL)
return;
+ if (!hdl->req_obj.req && !list_empty(&hdl->requests)) {
+ struct v4l2_ctrl_handler *req, *next_req;
+
+ list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
+ media_request_object_unbind(&req->req_obj);
+ media_request_object_put(&req->req_obj);
+ }
+ }
mutex_lock(hdl->lock);
/* Free all nodes */
list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
@@ -2016,13 +2112,19 @@ EXPORT_SYMBOL(v4l2_ctrl_find);
/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ctrl *ctrl)
+ struct v4l2_ctrl *ctrl,
+ struct v4l2_ctrl_ref **ctrl_ref,
+ bool from_other_dev, bool allocate_req)
{
struct v4l2_ctrl_ref *ref;
struct v4l2_ctrl_ref *new_ref;
u32 id = ctrl->id;
u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1;
int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
+ unsigned int size_extra_req = 0;
+
+ if (ctrl_ref)
+ *ctrl_ref = NULL;
/*
* Automatically add the control class if it is not yet present and
@@ -2036,10 +2138,16 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
if (hdl->error)
return hdl->error;
- new_ref = kzalloc(sizeof(*new_ref), GFP_KERNEL);
+ if (allocate_req)
+ size_extra_req = ctrl->elems * ctrl->elem_size;
+ new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL);
if (!new_ref)
return handler_set_err(hdl, -ENOMEM);
new_ref->ctrl = ctrl;
+ new_ref->from_other_dev = from_other_dev;
+ if (size_extra_req)
+ new_ref->p_req.p = &new_ref[1];
+
if (ctrl->handler == hdl) {
/* By default each control starts in a cluster of its own.
new_ref->ctrl is basically a cluster array with one
@@ -2079,6 +2187,8 @@ insert_in_hash:
/* Insert the control node in the hash */
new_ref->next = hdl->buckets[bucket];
hdl->buckets[bucket] = new_ref;
+ if (ctrl_ref)
+ *ctrl_ref = new_ref;
unlock:
mutex_unlock(hdl->lock);
@@ -2133,6 +2243,12 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_U32:
elem_size = sizeof(u32);
break;
+ case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
@@ -2220,7 +2336,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
}
- if (handler_new_ref(hdl, ctrl)) {
+ if (handler_new_ref(hdl, ctrl, NULL, false, false)) {
kvfree(ctrl);
return NULL;
}
@@ -2389,7 +2505,8 @@ EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
/* Add the controls from another handler to our own. */
int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_handler *add,
- bool (*filter)(const struct v4l2_ctrl *ctrl))
+ bool (*filter)(const struct v4l2_ctrl *ctrl),
+ bool from_other_dev)
{
struct v4l2_ctrl_ref *ref;
int ret = 0;
@@ -2412,7 +2529,7 @@ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
/* Filter any unwanted controls */
if (filter && !filter(ctrl))
continue;
- ret = handler_new_ref(hdl, ctrl);
+ ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false);
if (ret)
break;
}
@@ -2511,20 +2628,15 @@ void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
}
EXPORT_SYMBOL(v4l2_ctrl_activate);
-/* Grab/ungrab a control.
- Typically used when streaming starts and you want to grab controls,
- preventing the user from changing them.
-
- Just call this and the framework will block any attempts to change
- these controls. */
-void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
+void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
{
bool old;
if (ctrl == NULL)
return;
- v4l2_ctrl_lock(ctrl);
+ lockdep_assert_held(ctrl->handler->lock);
+
if (grabbed)
/* set V4L2_CTRL_FLAG_GRABBED */
old = test_and_set_bit(1, &ctrl->flags);
@@ -2533,9 +2645,8 @@ void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
old = test_and_clear_bit(1, &ctrl->flags);
if (old != grabbed)
send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
- v4l2_ctrl_unlock(ctrl);
}
-EXPORT_SYMBOL(v4l2_ctrl_grab);
+EXPORT_SYMBOL(__v4l2_ctrl_grab);
/* Log the control name and value */
static void log_ctrl(const struct v4l2_ctrl *ctrl,
@@ -2722,7 +2833,7 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
qc->id = id;
else
qc->id = ctrl->id;
- strlcpy(qc->name, ctrl->name, sizeof(qc->name));
+ strscpy(qc->name, ctrl->name, sizeof(qc->name));
qc->flags = user_flags(ctrl);
qc->type = ctrl->type;
qc->elem_size = ctrl->elem_size;
@@ -2754,7 +2865,7 @@ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
qc->id = qec.id;
qc->type = qec.type;
qc->flags = qec.flags;
- strlcpy(qc->name, qec.name, sizeof(qc->name));
+ strscpy(qc->name, qec.name, sizeof(qc->name));
switch (qc->type) {
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_BOOLEAN:
@@ -2813,7 +2924,7 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
if (ctrl->qmenu[i] == NULL || ctrl->qmenu[i][0] == '\0')
return -EINVAL;
- strlcpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
+ strscpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
} else {
qm->value = ctrl->qmenu_int[i];
}
@@ -2821,6 +2932,148 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
}
EXPORT_SYMBOL(v4l2_querymenu);
+static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_handler *from)
+{
+ struct v4l2_ctrl_ref *ref;
+ int err = 0;
+
+ if (WARN_ON(!hdl || hdl == from))
+ return -EINVAL;
+
+ if (hdl->error)
+ return hdl->error;
+
+ WARN_ON(hdl->lock != &hdl->_lock);
+
+ mutex_lock(from->lock);
+ list_for_each_entry(ref, &from->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl_ref *new_ref;
+
+ /* Skip refs inherited from other devices */
+ if (ref->from_other_dev)
+ continue;
+ /* And buttons */
+ if (ctrl->type == V4L2_CTRL_TYPE_BUTTON)
+ continue;
+ err = handler_new_ref(hdl, ctrl, &new_ref, false, true);
+ if (err)
+ break;
+ }
+ mutex_unlock(from->lock);
+ return err;
+}
+
+static void v4l2_ctrl_request_queue(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
+ struct v4l2_ctrl_handler *prev_hdl = NULL;
+ struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+
+ if (list_empty(&main_hdl->requests_queued))
+ goto queue;
+
+ prev_hdl = list_last_entry(&main_hdl->requests_queued,
+ struct v4l2_ctrl_handler, requests_queued);
+ /*
+ * Note: prev_hdl and hdl must contain the same list of control
+ * references, so if any differences are detected then that is a
+ * driver bug and the WARN_ON is triggered.
+ */
+ mutex_lock(prev_hdl->lock);
+ ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs,
+ struct v4l2_ctrl_ref, node);
+ list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) {
+ if (ref_ctrl->req)
+ continue;
+ while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) {
+ /* Should never happen, but just in case... */
+ if (list_is_last(&ref_ctrl_prev->node,
+ &prev_hdl->ctrl_refs))
+ break;
+ ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node);
+ }
+ if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id))
+ break;
+ ref_ctrl->req = ref_ctrl_prev->req;
+ }
+ mutex_unlock(prev_hdl->lock);
+queue:
+ list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
+ hdl->request_is_queued = true;
+}
+
+static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_del_init(&hdl->requests);
+ if (hdl->request_is_queued) {
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ }
+}
+
+static void v4l2_ctrl_request_release(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(hdl);
+}
+
+static const struct media_request_object_ops req_ops = {
+ .queue = v4l2_ctrl_request_queue,
+ .unbind = v4l2_ctrl_request_unbind,
+ .release = v4l2_ctrl_request_release,
+};
+
+struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
+ struct v4l2_ctrl_handler *parent)
+{
+ struct media_request_object *obj;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING &&
+ req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return NULL;
+
+ obj = media_request_object_find(req, &req_ops, parent);
+ if (obj)
+ return container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find);
+
+struct v4l2_ctrl *
+v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+
+ return (ref && ref->req == ref) ? ref->ctrl : NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
+
+static int v4l2_ctrl_request_bind(struct media_request *req,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl_handler *from)
+{
+ int ret;
+
+ ret = v4l2_ctrl_request_clone(hdl, from);
+
+ if (!ret) {
+ ret = media_request_object_bind(req, &req_ops,
+ from, false, &hdl->req_obj);
+ if (!ret)
+ list_add_tail(&hdl->requests, &from->requests);
+ }
+ return ret;
+}
/* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
@@ -2882,6 +3135,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
if (cs->which &&
cs->which != V4L2_CTRL_WHICH_DEF_VAL &&
+ cs->which != V4L2_CTRL_WHICH_REQUEST_VAL &&
V4L2_CTRL_ID2WHICH(id) != cs->which)
return -EINVAL;
@@ -2892,6 +3146,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
ref = find_ref_lock(hdl, id);
if (ref == NULL)
return -EINVAL;
+ h->ref = ref;
ctrl = ref->ctrl;
if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -2914,7 +3169,6 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
}
/* Store the ref to the master control of the cluster */
h->mref = ref;
- h->ctrl = ctrl;
/* Initially set next to 0, meaning that there is no other
control in this helper array belonging to the same
cluster */
@@ -2961,15 +3215,15 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
whether there are any controls at all. */
static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
{
- if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL)
+ if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL ||
+ which == V4L2_CTRL_WHICH_REQUEST_VAL)
return 0;
return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
}
-
-
/* Get extended controls. Allocates the helpers array if needed. */
-int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs)
{
struct v4l2_ctrl_helper helper[4];
struct v4l2_ctrl_helper *helpers = helper;
@@ -2999,7 +3253,7 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
cs->error_idx = cs->count;
for (i = 0; !ret && i < cs->count; i++)
- if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
ret = -EACCES;
for (i = 0; !ret && i < cs->count; i++) {
@@ -3033,8 +3287,12 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
u32 idx = i;
do {
- ret = ctrl_to_user(cs->controls + idx,
- helpers[idx].ctrl);
+ if (helpers[idx].ref->req)
+ ret = req_to_user(cs->controls + idx,
+ helpers[idx].ref->req);
+ else
+ ret = ctrl_to_user(cs->controls + idx,
+ helpers[idx].ref->ctrl);
idx = helpers[idx].next;
} while (!ret && idx);
}
@@ -3045,6 +3303,91 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
kvfree(helpers);
return ret;
}
+
+static struct media_request_object *
+v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl,
+ struct media_request *req, bool set)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *new_hdl;
+ int ret;
+
+ if (IS_ERR(req))
+ return ERR_CAST(req);
+
+ if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
+ return ERR_PTR(-EBUSY);
+
+ obj = media_request_object_find(req, &req_ops, hdl);
+ if (obj)
+ return obj;
+ if (!set)
+ return ERR_PTR(-ENOENT);
+
+ new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL);
+ if (!new_hdl)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &new_hdl->req_obj;
+ ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8);
+ if (!ret)
+ ret = v4l2_ctrl_request_bind(req, new_hdl, hdl);
+ if (ret) {
+ kfree(new_hdl);
+
+ return ERR_PTR(ret);
+ }
+
+ media_request_object_get(obj);
+ return obj;
+}
+
+int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
+{
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) {
+ if (!mdev || cs->request_fd < 0)
+ return -EINVAL;
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (req->state != MEDIA_REQUEST_STATE_COMPLETE) {
+ media_request_put(req);
+ return -EACCES;
+ }
+
+ ret = media_request_lock_for_access(req);
+ if (ret) {
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, false);
+ if (IS_ERR(obj)) {
+ media_request_unlock_for_access(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ }
+
+ ret = v4l2_g_ext_ctrls_common(hdl, cs);
+
+ if (obj) {
+ media_request_unlock_for_access(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+ }
+ return ret;
+}
EXPORT_SYMBOL(v4l2_g_ext_ctrls);
/* Helper function to get a single control */
@@ -3186,7 +3529,7 @@ static int validate_ctrls(struct v4l2_ext_controls *cs,
cs->error_idx = cs->count;
for (i = 0; i < cs->count; i++) {
- struct v4l2_ctrl *ctrl = helpers[i].ctrl;
+ struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl;
union v4l2_ctrl_ptr p_new;
cs->error_idx = i;
@@ -3233,9 +3576,9 @@ static void update_from_auto_cluster(struct v4l2_ctrl *master)
}
/* Try or try-and-set controls */
-static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs,
- bool set)
+static int try_set_ext_ctrls_common(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs, bool set)
{
struct v4l2_ctrl_helper helper[4];
struct v4l2_ctrl_helper *helpers = helper;
@@ -3298,7 +3641,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
do {
/* Check if the auto control is part of the
list, and remember the new value. */
- if (helpers[tmp_idx].ctrl == master)
+ if (helpers[tmp_idx].ref->ctrl == master)
new_auto_val = cs->controls[tmp_idx].value;
tmp_idx = helpers[tmp_idx].next;
} while (tmp_idx);
@@ -3311,7 +3654,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
/* Copy the new caller-supplied control values.
user_to_new() sets 'is_new' to 1. */
do {
- struct v4l2_ctrl *ctrl = helpers[idx].ctrl;
+ struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl;
ret = user_to_new(cs->controls + idx, ctrl);
if (!ret && ctrl->is_ptr)
@@ -3320,14 +3663,23 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
} while (!ret && idx);
if (!ret)
- ret = try_or_set_cluster(fh, master, set, 0);
+ ret = try_or_set_cluster(fh, master,
+ !hdl->req_obj.req && set, 0);
+ if (!ret && hdl->req_obj.req && set) {
+ for (j = 0; j < master->ncontrols; j++) {
+ struct v4l2_ctrl_ref *ref =
+ find_ref(hdl, master->cluster[j]->id);
+
+ new_to_req(ref);
+ }
+ }
/* Copy the new values back to userspace. */
if (!ret) {
idx = i;
do {
ret = new_to_user(cs->controls + idx,
- helpers[idx].ctrl);
+ helpers[idx].ref->ctrl);
idx = helpers[idx].next;
} while (!ret && idx);
}
@@ -3339,16 +3691,60 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
return ret;
}
-int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+static int try_set_ext_ctrls(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set)
{
- return try_set_ext_ctrls(NULL, hdl, cs, false);
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) {
+ if (!mdev || cs->request_fd < 0)
+ return -EINVAL;
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = media_request_lock_for_update(req);
+ if (ret) {
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, set);
+ if (IS_ERR(obj)) {
+ media_request_unlock_for_update(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ }
+
+ ret = try_set_ext_ctrls_common(fh, hdl, cs, set);
+
+ if (obj) {
+ media_request_unlock_for_update(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+ }
+
+ return ret;
+}
+
+int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(NULL, hdl, mdev, cs, false);
}
EXPORT_SYMBOL(v4l2_try_ext_ctrls);
int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs)
+ struct media_device *mdev, struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(fh, hdl, cs, true);
+ return try_set_ext_ctrls(fh, hdl, mdev, cs, true);
}
EXPORT_SYMBOL(v4l2_s_ext_ctrls);
@@ -3442,11 +3838,167 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
/* It's a driver bug if this happens. */
WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING);
- strlcpy(ctrl->p_new.p_char, s, ctrl->maximum + 1);
+ strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1);
return set_ctrl(NULL, ctrl, 0);
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
+void v4l2_ctrl_request_complete(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+
+ if (!req || !main_hdl)
+ return;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj)
+ return;
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ unsigned int i;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ ref->req = ref;
+
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ call_op(master, g_volatile_ctrl);
+ new_to_req(ref);
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+ if (ref->req == ref)
+ continue;
+
+ v4l2_ctrl_lock(ctrl);
+ if (ref->req)
+ ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
+ else
+ ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
+ v4l2_ctrl_unlock(ctrl);
+ }
+
+ WARN_ON(!hdl->request_is_queued);
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ media_request_object_complete(obj);
+ media_request_object_put(obj);
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_complete);
+
+void v4l2_ctrl_request_setup(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+
+ if (!req || !main_hdl)
+ return;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj)
+ return;
+ if (obj->completed) {
+ media_request_object_put(obj);
+ return;
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node)
+ ref->req_done = false;
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ bool have_new_data = false;
+ int i;
+
+ /*
+ * Skip if this control was already handled by a cluster.
+ * Skip button controls and read-only controls.
+ */
+ if (ref->req_done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
+ (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
+ continue;
+
+ v4l2_ctrl_lock(master);
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ if (r->req && r == r->req) {
+ have_new_data = true;
+ break;
+ }
+ }
+ }
+ if (!have_new_data) {
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ req_to_new(r);
+ master->cluster[i]->is_new = 1;
+ r->req_done = true;
+ }
+ }
+ /*
+ * For volatile autoclusters that are currently in auto mode
+ * we need to discover if it will be set to manual mode.
+ * If so, then we have to copy the current volatile values
+ * first since those will become the new manual values (which
+ * may be overwritten by explicit new values from this set
+ * of controls).
+ */
+ if (master->is_auto && master->has_volatiles &&
+ !is_cur_manual(master)) {
+ s32 new_auto_val = *master->p_new.p_s32;
+
+ /*
+ * If the new value == the manual value, then copy
+ * the current volatile values.
+ */
+ if (new_auto_val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+ }
+
+ try_or_set_cluster(NULL, master, true, 0);
+
+ v4l2_ctrl_unlock(master);
+ }
+
+ media_request_object_put(obj);
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_setup);
+
void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
{
if (ctrl == NULL)
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 69e775930fc4..feb749aaaa42 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -444,8 +444,22 @@ static int v4l2_release(struct inode *inode, struct file *filp)
struct video_device *vdev = video_devdata(filp);
int ret = 0;
- if (vdev->fops->release)
- ret = vdev->fops->release(filp);
+ /*
+ * We need to serialize the release() with queueing new requests.
+ * The release() may trigger the cancellation of a streaming
+ * operation, and that should not be mixed with queueing a new
+ * request at the same time.
+ */
+ if (vdev->fops->release) {
+ if (v4l2_device_supports_requests(vdev->v4l2_dev)) {
+ mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ ret = vdev->fops->release(filp);
+ mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ } else {
+ ret = vdev->fops->release(filp);
+ }
+ }
+
if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
dprintk("%s: release\n",
video_device_node_name(vdev));
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 3940e55c72f1..df0ac38c4050 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -178,7 +178,8 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
sd->v4l2_dev = v4l2_dev;
/* This just returns 0 if either of the two args is NULL */
- err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL);
+ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler,
+ NULL, true);
if (err)
goto error_module;
@@ -245,7 +246,7 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
}
video_set_drvdata(vdev, sd);
- strlcpy(vdev->name, sd->name, sizeof(vdev->name));
+ strscpy(vdev->name, sd->name, sizeof(vdev->name));
vdev->v4l2_dev = v4l2_dev;
vdev->fops = &v4l2_subdev_fops;
vdev->release = v4l2_device_release_subdev_node;
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index c81faea96fba..4f23e939ead0 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -15,6 +15,7 @@
#include <media/v4l2-dv-timings.h>
#include <linux/math64.h>
#include <linux/hdmi.h>
+#include <media/cec.h>
MODULE_AUTHOR("Hans Verkuil");
MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
@@ -373,6 +374,45 @@ struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t)
}
EXPORT_SYMBOL_GPL(v4l2_dv_timings_aspect_ratio);
+/** v4l2_calc_timeperframe - helper function to calculate timeperframe based
+ * v4l2_dv_timings fields.
+ * @t - Timings for the video mode.
+ *
+ * Calculates the expected timeperframe using the pixel clock value and
+ * horizontal/vertical measures. This means that v4l2_dv_timings structure
+ * must be correctly and fully filled.
+ */
+struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ struct v4l2_fract fps_fract = { 1, 1 };
+ unsigned long n, d;
+ u32 htot, vtot, fps;
+ u64 pclk;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return fps_fract;
+
+ htot = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ pclk = bt->pixelclock;
+
+ if ((bt->flags & V4L2_DV_FL_CAN_DETECT_REDUCED_FPS) &&
+ (bt->flags & V4L2_DV_FL_REDUCED_FPS))
+ pclk = div_u64(pclk * 1000ULL, 1001);
+
+ fps = (htot * vtot) > 0 ? div_u64((100 * pclk), (htot * vtot)) : 0;
+ if (!fps)
+ return fps_fract;
+
+ rational_best_approximation(fps, 100, fps, 100, &n, &d);
+
+ fps_fract.numerator = d;
+ fps_fract.denominator = n;
+ return fps_fract;
+}
+EXPORT_SYMBOL_GPL(v4l2_calc_timeperframe);
+
/*
* CVT defines
* Based on Coordinated Video Timings Standard
@@ -837,9 +877,9 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
switch (avi->colorimetry) {
case HDMI_COLORIMETRY_EXTENDED:
switch (avi->extended_colorimetry) {
- case HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB:
- c.colorspace = V4L2_COLORSPACE_ADOBERGB;
- c.xfer_func = V4L2_XFER_FUNC_ADOBERGB;
+ case HDMI_EXTENDED_COLORIMETRY_OPRGB:
+ c.colorspace = V4L2_COLORSPACE_OPRGB;
+ c.xfer_func = V4L2_XFER_FUNC_OPRGB;
break;
case HDMI_EXTENDED_COLORIMETRY_BT2020:
c.colorspace = V4L2_COLORSPACE_BT2020;
@@ -908,10 +948,10 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
c.ycbcr_enc = V4L2_YCBCR_ENC_601;
c.xfer_func = V4L2_XFER_FUNC_SRGB;
break;
- case HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601:
- c.colorspace = V4L2_COLORSPACE_ADOBERGB;
+ case HDMI_EXTENDED_COLORIMETRY_OPYCC_601:
+ c.colorspace = V4L2_COLORSPACE_OPRGB;
c.ycbcr_enc = V4L2_YCBCR_ENC_601;
- c.xfer_func = V4L2_XFER_FUNC_ADOBERGB;
+ c.xfer_func = V4L2_XFER_FUNC_OPRGB;
break;
case HDMI_EXTENDED_COLORIMETRY_BT2020:
c.colorspace = V4L2_COLORSPACE_BT2020;
@@ -942,3 +982,153 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
return c;
}
EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry);
+
+/**
+ * v4l2_get_edid_phys_addr() - find and return the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @offset: If not %NULL then the location of the physical address
+ * bytes in the EDID will be returned here. This is set to 0
+ * if there is no physical address found.
+ *
+ * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
+ */
+u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+
+ if (offset)
+ *offset = loc;
+ if (loc == 0)
+ return CEC_PHYS_ADDR_INVALID;
+ return (edid[loc] << 8) | edid[loc + 1];
+}
+EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr);
+
+/**
+ * v4l2_set_edid_phys_addr() - find and set the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @phys_addr: the new physical address
+ *
+ * This function finds the location of the physical address in the EDID
+ * and fills in the given physical address and updates the checksum
+ * at the end of the EDID block. It does nothing if the EDID doesn't
+ * contain a physical address.
+ */
+void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+ u8 sum = 0;
+ unsigned int i;
+
+ if (loc == 0)
+ return;
+ edid[loc] = phys_addr >> 8;
+ edid[loc + 1] = phys_addr & 0xff;
+ loc &= ~0x7f;
+
+ /* update the checksum */
+ for (i = loc; i < loc + 127; i++)
+ sum += edid[i];
+ edid[i] = 256 - sum;
+}
+EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr);
+
+/**
+ * v4l2_phys_addr_for_input() - calculate the PA for an input
+ *
+ * @phys_addr: the physical address of the parent
+ * @input: the number of the input port, must be between 1 and 15
+ *
+ * This function calculates a new physical address based on the input
+ * port number. For example:
+ *
+ * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
+ *
+ * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
+ *
+ * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
+ *
+ * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
+ *
+ * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
+ */
+u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input)
+{
+ /* Check if input is sane */
+ if (WARN_ON(input == 0 || input > 0xf))
+ return CEC_PHYS_ADDR_INVALID;
+
+ if (phys_addr == 0)
+ return input << 12;
+
+ if ((phys_addr & 0x0fff) == 0)
+ return phys_addr | (input << 8);
+
+ if ((phys_addr & 0x00ff) == 0)
+ return phys_addr | (input << 4);
+
+ if ((phys_addr & 0x000f) == 0)
+ return phys_addr | input;
+
+ /*
+ * All nibbles are used so no valid physical addresses can be assigned
+ * to the input.
+ */
+ return CEC_PHYS_ADDR_INVALID;
+}
+EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input);
+
+/**
+ * v4l2_phys_addr_validate() - validate a physical address from an EDID
+ *
+ * @phys_addr: the physical address to validate
+ * @parent: if not %NULL, then this is filled with the parents PA.
+ * @port: if not %NULL, then this is filled with the input port.
+ *
+ * This validates a physical address as read from an EDID. If the
+ * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
+ * then it will return -EINVAL.
+ *
+ * The parent PA is passed into %parent and the input port is passed into
+ * %port. For example:
+ *
+ * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
+ *
+ * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
+ *
+ * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
+ *
+ * PA = f.f.f.f: has parent f.f.f.f and input port 0.
+ *
+ * Return: 0 if the PA is valid, -EINVAL if not.
+ */
+int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
+{
+ int i;
+
+ if (parent)
+ *parent = phys_addr;
+ if (port)
+ *port = 0;
+ if (phys_addr == CEC_PHYS_ADDR_INVALID)
+ return 0;
+ for (i = 0; i < 16; i += 4)
+ if (phys_addr & (0xf << i))
+ break;
+ if (i == 16)
+ return 0;
+ if (parent)
+ *parent = phys_addr & (0xfff0 << i);
+ if (port)
+ *port = (phys_addr >> i) & 0xf;
+ for (i += 4; i < 16; i += 4)
+ if ((phys_addr & (0xf << i)) == 0)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 127fe6eb91d9..a3ef1f50a4b3 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
if (sev == NULL)
return;
- /*
- * If the event has been added to the fh->subscribed list, but its
- * add op has not completed yet elems will be 0, treat this as
- * not being subscribed.
- */
- if (!sev->elems)
- return;
-
/* Increase event sequence number on fh. */
fh->sequence++;
@@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
unsigned i;
+ int ret = 0;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
@@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
+ sev->elems = elems;
+
+ mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
- if (!found_ev)
- list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
+ /* Already listening */
kvfree(sev);
- return 0; /* Already listening */
+ goto out_unlock;
}
if (sev->ops && sev->ops->add) {
- int ret = sev->ops->add(sev, elems);
+ ret = sev->ops->add(sev, elems);
if (ret) {
- sev->ops = NULL;
- v4l2_event_unsubscribe(fh, sub);
- return ret;
+ kvfree(sev);
+ goto out_unlock;
}
}
- /* Mark as ready for use */
- sev->elems = elems;
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add(&sev->list, &fh->subscribed);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- return 0;
+out_unlock:
+ mutex_unlock(&fh->subscribe_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
@@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
return 0;
}
+ mutex_lock(&fh->subscribe_lock);
+
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
if (sev && sev->ops && sev->ops->del)
sev->ops->del(sev);
+ mutex_unlock(&fh->subscribe_lock);
+
kvfree(sev);
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 3895999bf880..c91a7bd3ecfc 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
INIT_LIST_HEAD(&fh->available);
INIT_LIST_HEAD(&fh->subscribed);
fh->sequence = -1;
+ mutex_init(&fh->subscribe_lock);
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
@@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
return;
v4l_disable_media_source(fh->vdev);
v4l2_event_unsubscribe_all(fh);
+ mutex_destroy(&fh->subscribe_lock);
fh->vdev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c
index 215b4804ada2..1697932af5ea 100644
--- a/drivers/media/v4l2-core/v4l2-flash-led-class.c
+++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c
@@ -640,7 +640,7 @@ static struct v4l2_flash *__v4l2_flash_init(
v4l2_subdev_init(sd, &v4l2_flash_subdev_ops);
sd->internal_ops = &v4l2_flash_subdev_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- strlcpy(sd->name, config->dev_name, sizeof(sd->name));
+ strscpy(sd->name, config->dev_name, sizeof(sd->name));
ret = media_entity_pads_init(&sd->entity, 0, NULL);
if (ret < 0)
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 169bdbb1f61a..218f0da0ce76 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -36,194 +36,469 @@ enum v4l2_fwnode_bus_type {
V4L2_FWNODE_BUS_TYPE_CSI2_CPHY,
V4L2_FWNODE_BUS_TYPE_CSI1,
V4L2_FWNODE_BUS_TYPE_CCP2,
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY,
+ V4L2_FWNODE_BUS_TYPE_PARALLEL,
+ V4L2_FWNODE_BUS_TYPE_BT656,
NR_OF_V4L2_FWNODE_BUS_TYPE,
};
+static const struct v4l2_fwnode_bus_conv {
+ enum v4l2_fwnode_bus_type fwnode_bus_type;
+ enum v4l2_mbus_type mbus_type;
+ const char *name;
+} busses[] = {
+ {
+ V4L2_FWNODE_BUS_TYPE_GUESS,
+ V4L2_MBUS_UNKNOWN,
+ "not specified",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CSI2_CPHY,
+ V4L2_MBUS_CSI2_CPHY,
+ "MIPI CSI-2 C-PHY",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CSI1,
+ V4L2_MBUS_CSI1,
+ "MIPI CSI-1",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CCP2,
+ V4L2_MBUS_CCP2,
+ "compact camera port 2",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY,
+ V4L2_MBUS_CSI2_DPHY,
+ "MIPI CSI-2 D-PHY",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_PARALLEL,
+ V4L2_MBUS_PARALLEL,
+ "parallel",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_BT656,
+ V4L2_MBUS_BT656,
+ "Bt.656",
+ }
+};
+
+static const struct v4l2_fwnode_bus_conv *
+get_v4l2_fwnode_bus_conv_by_fwnode_bus(enum v4l2_fwnode_bus_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(busses); i++)
+ if (busses[i].fwnode_bus_type == type)
+ return &busses[i];
+
+ return NULL;
+}
+
+static enum v4l2_mbus_type
+v4l2_fwnode_bus_type_to_mbus(enum v4l2_fwnode_bus_type type)
+{
+ const struct v4l2_fwnode_bus_conv *conv =
+ get_v4l2_fwnode_bus_conv_by_fwnode_bus(type);
+
+ return conv ? conv->mbus_type : V4L2_MBUS_UNKNOWN;
+}
+
+static const char *
+v4l2_fwnode_bus_type_to_string(enum v4l2_fwnode_bus_type type)
+{
+ const struct v4l2_fwnode_bus_conv *conv =
+ get_v4l2_fwnode_bus_conv_by_fwnode_bus(type);
+
+ return conv ? conv->name : "not found";
+}
+
+static const struct v4l2_fwnode_bus_conv *
+get_v4l2_fwnode_bus_conv_by_mbus(enum v4l2_mbus_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(busses); i++)
+ if (busses[i].mbus_type == type)
+ return &busses[i];
+
+ return NULL;
+}
+
+static const char *
+v4l2_fwnode_mbus_type_to_string(enum v4l2_mbus_type type)
+{
+ const struct v4l2_fwnode_bus_conv *conv =
+ get_v4l2_fwnode_bus_conv_by_mbus(type);
+
+ return conv ? conv->name : "not found";
+}
+
static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
- struct v4l2_fwnode_endpoint *vep)
+ struct v4l2_fwnode_endpoint *vep,
+ enum v4l2_mbus_type bus_type)
{
struct v4l2_fwnode_bus_mipi_csi2 *bus = &vep->bus.mipi_csi2;
- bool have_clk_lane = false;
+ bool have_clk_lane = false, have_data_lanes = false,
+ have_lane_polarities = false;
unsigned int flags = 0, lanes_used = 0;
+ u32 array[1 + V4L2_FWNODE_CSI2_MAX_DATA_LANES];
+ u32 clock_lane = 0;
+ unsigned int num_data_lanes = 0;
+ bool use_default_lane_mapping = false;
unsigned int i;
u32 v;
int rval;
+ if (bus_type == V4L2_MBUS_CSI2_DPHY ||
+ bus_type == V4L2_MBUS_CSI2_CPHY) {
+ use_default_lane_mapping = true;
+
+ num_data_lanes = min_t(u32, bus->num_data_lanes,
+ V4L2_FWNODE_CSI2_MAX_DATA_LANES);
+
+ clock_lane = bus->clock_lane;
+ if (clock_lane)
+ use_default_lane_mapping = false;
+
+ for (i = 0; i < num_data_lanes; i++) {
+ array[i] = bus->data_lanes[i];
+ if (array[i])
+ use_default_lane_mapping = false;
+ }
+
+ if (use_default_lane_mapping)
+ pr_debug("using default lane mapping\n");
+ }
+
rval = fwnode_property_read_u32_array(fwnode, "data-lanes", NULL, 0);
if (rval > 0) {
- u32 array[1 + V4L2_FWNODE_CSI2_MAX_DATA_LANES];
-
- bus->num_data_lanes =
+ num_data_lanes =
min_t(int, V4L2_FWNODE_CSI2_MAX_DATA_LANES, rval);
fwnode_property_read_u32_array(fwnode, "data-lanes", array,
- bus->num_data_lanes);
+ num_data_lanes);
- for (i = 0; i < bus->num_data_lanes; i++) {
- if (lanes_used & BIT(array[i]))
- pr_warn("duplicated lane %u in data-lanes\n",
- array[i]);
- lanes_used |= BIT(array[i]);
+ have_data_lanes = true;
+ }
- bus->data_lanes[i] = array[i];
+ for (i = 0; i < num_data_lanes; i++) {
+ if (lanes_used & BIT(array[i])) {
+ if (have_data_lanes || !use_default_lane_mapping)
+ pr_warn("duplicated lane %u in data-lanes, using defaults\n",
+ array[i]);
+ use_default_lane_mapping = true;
}
+ lanes_used |= BIT(array[i]);
- rval = fwnode_property_read_u32_array(fwnode,
- "lane-polarities", NULL,
- 0);
- if (rval > 0) {
- if (rval != 1 + bus->num_data_lanes /* clock+data */) {
- pr_warn("invalid number of lane-polarities entries (need %u, got %u)\n",
- 1 + bus->num_data_lanes, rval);
- return -EINVAL;
- }
-
- fwnode_property_read_u32_array(fwnode,
- "lane-polarities", array,
- 1 + bus->num_data_lanes);
+ if (have_data_lanes)
+ pr_debug("lane %u position %u\n", i, array[i]);
+ }
- for (i = 0; i < 1 + bus->num_data_lanes; i++)
- bus->lane_polarities[i] = array[i];
+ rval = fwnode_property_read_u32_array(fwnode, "lane-polarities", NULL,
+ 0);
+ if (rval > 0) {
+ if (rval != 1 + num_data_lanes /* clock+data */) {
+ pr_warn("invalid number of lane-polarities entries (need %u, got %u)\n",
+ 1 + num_data_lanes, rval);
+ return -EINVAL;
}
+ have_lane_polarities = true;
}
if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) {
- if (lanes_used & BIT(v))
- pr_warn("duplicated lane %u in clock-lanes\n", v);
- lanes_used |= BIT(v);
-
- bus->clock_lane = v;
+ clock_lane = v;
+ pr_debug("clock lane position %u\n", v);
have_clk_lane = true;
}
- if (fwnode_property_present(fwnode, "clock-noncontinuous"))
+ if (lanes_used & BIT(clock_lane)) {
+ if (have_clk_lane || !use_default_lane_mapping)
+ pr_warn("duplicated lane %u in clock-lanes, using defaults\n",
+ v);
+ use_default_lane_mapping = true;
+ }
+
+ if (fwnode_property_present(fwnode, "clock-noncontinuous")) {
flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
- else if (have_clk_lane || bus->num_data_lanes > 0)
+ pr_debug("non-continuous clock\n");
+ } else {
flags |= V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+ }
- bus->flags = flags;
- vep->bus_type = V4L2_MBUS_CSI2;
+ if (bus_type == V4L2_MBUS_CSI2_DPHY ||
+ bus_type == V4L2_MBUS_CSI2_CPHY || lanes_used ||
+ have_clk_lane || (flags & ~V4L2_MBUS_CSI2_CONTINUOUS_CLOCK)) {
+ bus->flags = flags;
+ if (bus_type == V4L2_MBUS_UNKNOWN)
+ vep->bus_type = V4L2_MBUS_CSI2_DPHY;
+ bus->num_data_lanes = num_data_lanes;
+
+ if (use_default_lane_mapping) {
+ bus->clock_lane = 0;
+ for (i = 0; i < num_data_lanes; i++)
+ bus->data_lanes[i] = 1 + i;
+ } else {
+ bus->clock_lane = clock_lane;
+ for (i = 0; i < num_data_lanes; i++)
+ bus->data_lanes[i] = array[i];
+ }
+
+ if (have_lane_polarities) {
+ fwnode_property_read_u32_array(fwnode,
+ "lane-polarities", array,
+ 1 + num_data_lanes);
+
+ for (i = 0; i < 1 + num_data_lanes; i++) {
+ bus->lane_polarities[i] = array[i];
+ pr_debug("lane %u polarity %sinverted",
+ i, array[i] ? "" : "not ");
+ }
+ } else {
+ pr_debug("no lane polarities defined, assuming not inverted\n");
+ }
+ }
return 0;
}
-static void v4l2_fwnode_endpoint_parse_parallel_bus(
- struct fwnode_handle *fwnode, struct v4l2_fwnode_endpoint *vep)
+#define PARALLEL_MBUS_FLAGS (V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_FIELD_EVEN_HIGH | \
+ V4L2_MBUS_FIELD_EVEN_LOW)
+
+static void
+v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep,
+ enum v4l2_mbus_type bus_type)
{
struct v4l2_fwnode_bus_parallel *bus = &vep->bus.parallel;
unsigned int flags = 0;
u32 v;
- if (!fwnode_property_read_u32(fwnode, "hsync-active", &v))
+ if (bus_type == V4L2_MBUS_PARALLEL || bus_type == V4L2_MBUS_BT656)
+ flags = bus->flags;
+
+ if (!fwnode_property_read_u32(fwnode, "hsync-active", &v)) {
+ flags &= ~(V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW);
flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH :
V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ pr_debug("hsync-active %s\n", v ? "high" : "low");
+ }
- if (!fwnode_property_read_u32(fwnode, "vsync-active", &v))
+ if (!fwnode_property_read_u32(fwnode, "vsync-active", &v)) {
+ flags &= ~(V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW);
flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH :
V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ pr_debug("vsync-active %s\n", v ? "high" : "low");
+ }
- if (!fwnode_property_read_u32(fwnode, "field-even-active", &v))
+ if (!fwnode_property_read_u32(fwnode, "field-even-active", &v)) {
+ flags &= ~(V4L2_MBUS_FIELD_EVEN_HIGH |
+ V4L2_MBUS_FIELD_EVEN_LOW);
flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH :
V4L2_MBUS_FIELD_EVEN_LOW;
- if (flags)
- vep->bus_type = V4L2_MBUS_PARALLEL;
- else
- vep->bus_type = V4L2_MBUS_BT656;
+ pr_debug("field-even-active %s\n", v ? "high" : "low");
+ }
- if (!fwnode_property_read_u32(fwnode, "pclk-sample", &v))
+ if (!fwnode_property_read_u32(fwnode, "pclk-sample", &v)) {
+ flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING);
flags |= v ? V4L2_MBUS_PCLK_SAMPLE_RISING :
V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ pr_debug("pclk-sample %s\n", v ? "high" : "low");
+ }
- if (!fwnode_property_read_u32(fwnode, "data-active", &v))
+ if (!fwnode_property_read_u32(fwnode, "data-active", &v)) {
+ flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING);
flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
V4L2_MBUS_DATA_ACTIVE_LOW;
+ pr_debug("data-active %s\n", v ? "high" : "low");
+ }
- if (fwnode_property_present(fwnode, "slave-mode"))
+ if (fwnode_property_present(fwnode, "slave-mode")) {
+ pr_debug("slave mode\n");
+ flags &= ~V4L2_MBUS_MASTER;
flags |= V4L2_MBUS_SLAVE;
- else
+ } else {
+ flags &= ~V4L2_MBUS_SLAVE;
flags |= V4L2_MBUS_MASTER;
+ }
- if (!fwnode_property_read_u32(fwnode, "bus-width", &v))
+ if (!fwnode_property_read_u32(fwnode, "bus-width", &v)) {
bus->bus_width = v;
+ pr_debug("bus-width %u\n", v);
+ }
- if (!fwnode_property_read_u32(fwnode, "data-shift", &v))
+ if (!fwnode_property_read_u32(fwnode, "data-shift", &v)) {
bus->data_shift = v;
+ pr_debug("data-shift %u\n", v);
+ }
- if (!fwnode_property_read_u32(fwnode, "sync-on-green-active", &v))
+ if (!fwnode_property_read_u32(fwnode, "sync-on-green-active", &v)) {
+ flags &= ~(V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH |
+ V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW);
flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
+ pr_debug("sync-on-green-active %s\n", v ? "high" : "low");
+ }
- if (!fwnode_property_read_u32(fwnode, "data-enable-active", &v))
+ if (!fwnode_property_read_u32(fwnode, "data-enable-active", &v)) {
+ flags &= ~(V4L2_MBUS_DATA_ENABLE_HIGH |
+ V4L2_MBUS_DATA_ENABLE_LOW);
flags |= v ? V4L2_MBUS_DATA_ENABLE_HIGH :
V4L2_MBUS_DATA_ENABLE_LOW;
+ pr_debug("data-enable-active %s\n", v ? "high" : "low");
+ }
- bus->flags = flags;
-
+ switch (bus_type) {
+ default:
+ bus->flags = flags;
+ if (flags & PARALLEL_MBUS_FLAGS)
+ vep->bus_type = V4L2_MBUS_PARALLEL;
+ else
+ vep->bus_type = V4L2_MBUS_BT656;
+ break;
+ case V4L2_MBUS_PARALLEL:
+ vep->bus_type = V4L2_MBUS_PARALLEL;
+ bus->flags = flags;
+ break;
+ case V4L2_MBUS_BT656:
+ vep->bus_type = V4L2_MBUS_BT656;
+ bus->flags = flags & ~PARALLEL_MBUS_FLAGS;
+ break;
+ }
}
static void
v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep,
- u32 bus_type)
+ enum v4l2_mbus_type bus_type)
{
struct v4l2_fwnode_bus_mipi_csi1 *bus = &vep->bus.mipi_csi1;
u32 v;
- if (!fwnode_property_read_u32(fwnode, "clock-inv", &v))
+ if (!fwnode_property_read_u32(fwnode, "clock-inv", &v)) {
bus->clock_inv = v;
+ pr_debug("clock-inv %u\n", v);
+ }
- if (!fwnode_property_read_u32(fwnode, "strobe", &v))
+ if (!fwnode_property_read_u32(fwnode, "strobe", &v)) {
bus->strobe = v;
+ pr_debug("strobe %u\n", v);
+ }
- if (!fwnode_property_read_u32(fwnode, "data-lanes", &v))
+ if (!fwnode_property_read_u32(fwnode, "data-lanes", &v)) {
bus->data_lane = v;
+ pr_debug("data-lanes %u\n", v);
+ }
- if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v))
+ if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) {
bus->clock_lane = v;
+ pr_debug("clock-lanes %u\n", v);
+ }
- if (bus_type == V4L2_FWNODE_BUS_TYPE_CCP2)
+ if (bus_type == V4L2_MBUS_CCP2)
vep->bus_type = V4L2_MBUS_CCP2;
else
vep->bus_type = V4L2_MBUS_CSI1;
}
-int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
- struct v4l2_fwnode_endpoint *vep)
+static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep)
{
- u32 bus_type = 0;
+ u32 bus_type = V4L2_FWNODE_BUS_TYPE_GUESS;
+ enum v4l2_mbus_type mbus_type;
int rval;
- fwnode_graph_parse_endpoint(fwnode, &vep->base);
+ if (vep->bus_type == V4L2_MBUS_UNKNOWN) {
+ /* Zero fields from bus union to until the end */
+ memset(&vep->bus, 0,
+ sizeof(*vep) - offsetof(typeof(*vep), bus));
+ }
- /* Zero fields from bus_type to until the end */
- memset(&vep->bus_type, 0, sizeof(*vep) -
- offsetof(typeof(*vep), bus_type));
+ pr_debug("===== begin V4L2 endpoint properties\n");
+
+ /*
+ * Zero the fwnode graph endpoint memory in case we don't end up parsing
+ * the endpoint.
+ */
+ memset(&vep->base, 0, sizeof(vep->base));
fwnode_property_read_u32(fwnode, "bus-type", &bus_type);
+ pr_debug("fwnode video bus type %s (%u), mbus type %s (%u)\n",
+ v4l2_fwnode_bus_type_to_string(bus_type), bus_type,
+ v4l2_fwnode_mbus_type_to_string(vep->bus_type),
+ vep->bus_type);
+ mbus_type = v4l2_fwnode_bus_type_to_mbus(bus_type);
+
+ if (vep->bus_type != V4L2_MBUS_UNKNOWN) {
+ if (mbus_type != V4L2_MBUS_UNKNOWN &&
+ vep->bus_type != mbus_type) {
+ pr_debug("expecting bus type %s\n",
+ v4l2_fwnode_mbus_type_to_string(vep->bus_type));
+ return -ENXIO;
+ }
+ } else {
+ vep->bus_type = mbus_type;
+ }
- switch (bus_type) {
- case V4L2_FWNODE_BUS_TYPE_GUESS:
- rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep);
+ switch (vep->bus_type) {
+ case V4L2_MBUS_UNKNOWN:
+ rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep,
+ V4L2_MBUS_UNKNOWN);
if (rval)
return rval;
- /*
- * Parse the parallel video bus properties only if none
- * of the MIPI CSI-2 specific properties were found.
- */
- if (vep->bus.mipi_csi2.flags == 0)
- v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep);
-
- return 0;
- case V4L2_FWNODE_BUS_TYPE_CCP2:
- case V4L2_FWNODE_BUS_TYPE_CSI1:
- v4l2_fwnode_endpoint_parse_csi1_bus(fwnode, vep, bus_type);
-
- return 0;
+
+ if (vep->bus_type == V4L2_MBUS_UNKNOWN)
+ v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep,
+ V4L2_MBUS_UNKNOWN);
+
+ pr_debug("assuming media bus type %s (%u)\n",
+ v4l2_fwnode_mbus_type_to_string(vep->bus_type),
+ vep->bus_type);
+
+ break;
+ case V4L2_MBUS_CCP2:
+ case V4L2_MBUS_CSI1:
+ v4l2_fwnode_endpoint_parse_csi1_bus(fwnode, vep, vep->bus_type);
+
+ break;
+ case V4L2_MBUS_CSI2_DPHY:
+ case V4L2_MBUS_CSI2_CPHY:
+ rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep,
+ vep->bus_type);
+ if (rval)
+ return rval;
+
+ break;
+ case V4L2_MBUS_PARALLEL:
+ case V4L2_MBUS_BT656:
+ v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep,
+ vep->bus_type);
+
+ break;
default:
- pr_warn("unsupported bus type %u\n", bus_type);
+ pr_warn("unsupported bus type %u\n", mbus_type);
return -EINVAL;
}
+
+ fwnode_graph_parse_endpoint(fwnode, &vep->base);
+
+ return 0;
+}
+
+int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ int ret;
+
+ ret = __v4l2_fwnode_endpoint_parse(fwnode, vep);
+
+ pr_debug("===== end V4L2 endpoint properties\n");
+
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse);
@@ -233,49 +508,48 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
return;
kfree(vep->link_frequencies);
- kfree(vep);
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
-struct v4l2_fwnode_endpoint *v4l2_fwnode_endpoint_alloc_parse(
- struct fwnode_handle *fwnode)
+int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep)
{
- struct v4l2_fwnode_endpoint *vep;
int rval;
- vep = kzalloc(sizeof(*vep), GFP_KERNEL);
- if (!vep)
- return ERR_PTR(-ENOMEM);
-
- rval = v4l2_fwnode_endpoint_parse(fwnode, vep);
+ rval = __v4l2_fwnode_endpoint_parse(fwnode, vep);
if (rval < 0)
- goto out_err;
+ return rval;
rval = fwnode_property_read_u64_array(fwnode, "link-frequencies",
NULL, 0);
if (rval > 0) {
+ unsigned int i;
+
vep->link_frequencies =
kmalloc_array(rval, sizeof(*vep->link_frequencies),
GFP_KERNEL);
- if (!vep->link_frequencies) {
- rval = -ENOMEM;
- goto out_err;
- }
+ if (!vep->link_frequencies)
+ return -ENOMEM;
vep->nr_of_link_frequencies = rval;
- rval = fwnode_property_read_u64_array(
- fwnode, "link-frequencies", vep->link_frequencies,
- vep->nr_of_link_frequencies);
- if (rval < 0)
- goto out_err;
+ rval = fwnode_property_read_u64_array(fwnode,
+ "link-frequencies",
+ vep->link_frequencies,
+ vep->nr_of_link_frequencies);
+ if (rval < 0) {
+ v4l2_fwnode_endpoint_free(vep);
+ return rval;
+ }
+
+ for (i = 0; i < vep->nr_of_link_frequencies; i++)
+ pr_info("link-frequencies %u value %llu\n", i,
+ vep->link_frequencies[i]);
}
- return vep;
+ pr_debug("===== end V4L2 endpoint properties\n");
-out_err:
- v4l2_fwnode_endpoint_free(vep);
- return ERR_PTR(rval);
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse);
@@ -320,43 +594,16 @@ void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link);
-static int v4l2_async_notifier_realloc(struct v4l2_async_notifier *notifier,
- unsigned int max_subdevs)
-{
- struct v4l2_async_subdev **subdevs;
-
- if (max_subdevs <= notifier->max_subdevs)
- return 0;
-
- subdevs = kvmalloc_array(
- max_subdevs, sizeof(*notifier->subdevs),
- GFP_KERNEL | __GFP_ZERO);
- if (!subdevs)
- return -ENOMEM;
-
- if (notifier->subdevs) {
- memcpy(subdevs, notifier->subdevs,
- sizeof(*subdevs) * notifier->num_subdevs);
-
- kvfree(notifier->subdevs);
- }
-
- notifier->subdevs = subdevs;
- notifier->max_subdevs = max_subdevs;
-
- return 0;
-}
-
-static int v4l2_async_notifier_fwnode_parse_endpoint(
- struct device *dev, struct v4l2_async_notifier *notifier,
- struct fwnode_handle *endpoint, unsigned int asd_struct_size,
- int (*parse_endpoint)(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd))
+static int
+v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *endpoint,
+ unsigned int asd_struct_size,
+ parse_endpoint_func parse_endpoint)
{
+ struct v4l2_fwnode_endpoint vep = { .bus_type = 0 };
struct v4l2_async_subdev *asd;
- struct v4l2_fwnode_endpoint *vep;
- int ret = 0;
+ int ret;
asd = kzalloc(asd_struct_size, GFP_KERNEL);
if (!asd)
@@ -367,32 +614,36 @@ static int v4l2_async_notifier_fwnode_parse_endpoint(
fwnode_graph_get_remote_port_parent(endpoint);
if (!asd->match.fwnode) {
dev_warn(dev, "bad remote port parent\n");
- ret = -EINVAL;
+ ret = -ENOTCONN;
goto out_err;
}
- vep = v4l2_fwnode_endpoint_alloc_parse(endpoint);
- if (IS_ERR(vep)) {
- ret = PTR_ERR(vep);
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &vep);
+ if (ret) {
dev_warn(dev, "unable to parse V4L2 fwnode endpoint (%d)\n",
ret);
goto out_err;
}
- ret = parse_endpoint ? parse_endpoint(dev, vep, asd) : 0;
+ ret = parse_endpoint ? parse_endpoint(dev, &vep, asd) : 0;
if (ret == -ENOTCONN)
- dev_dbg(dev, "ignoring port@%u/endpoint@%u\n", vep->base.port,
- vep->base.id);
+ dev_dbg(dev, "ignoring port@%u/endpoint@%u\n", vep.base.port,
+ vep.base.id);
else if (ret < 0)
dev_warn(dev,
"driver could not parse port@%u/endpoint@%u (%d)\n",
- vep->base.port, vep->base.id, ret);
- v4l2_fwnode_endpoint_free(vep);
+ vep.base.port, vep.base.id, ret);
+ v4l2_fwnode_endpoint_free(&vep);
if (ret < 0)
goto out_err;
- notifier->subdevs[notifier->num_subdevs] = asd;
- notifier->num_subdevs++;
+ ret = v4l2_async_notifier_add_subdev(notifier, asd);
+ if (ret < 0) {
+ /* not an error if asd already exists */
+ if (ret == -EEXIST)
+ ret = 0;
+ goto out_err;
+ }
return 0;
@@ -403,56 +654,21 @@ out_err:
return ret == -ENOTCONN ? 0 : ret;
}
-static int __v4l2_async_notifier_parse_fwnode_endpoints(
- struct device *dev, struct v4l2_async_notifier *notifier,
- size_t asd_struct_size, unsigned int port, bool has_port,
- int (*parse_endpoint)(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd))
+static int
+__v4l2_async_notifier_parse_fwnode_ep(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ unsigned int port,
+ bool has_port,
+ parse_endpoint_func parse_endpoint)
{
struct fwnode_handle *fwnode;
- unsigned int max_subdevs = notifier->max_subdevs;
- int ret;
+ int ret = 0;
if (WARN_ON(asd_struct_size < sizeof(struct v4l2_async_subdev)))
return -EINVAL;
- for (fwnode = NULL; (fwnode = fwnode_graph_get_next_endpoint(
- dev_fwnode(dev), fwnode)); ) {
- struct fwnode_handle *dev_fwnode;
- bool is_available;
-
- dev_fwnode = fwnode_graph_get_port_parent(fwnode);
- is_available = fwnode_device_is_available(dev_fwnode);
- fwnode_handle_put(dev_fwnode);
- if (!is_available)
- continue;
-
- if (has_port) {
- struct fwnode_endpoint ep;
-
- ret = fwnode_graph_parse_endpoint(fwnode, &ep);
- if (ret) {
- fwnode_handle_put(fwnode);
- return ret;
- }
-
- if (ep.port != port)
- continue;
- }
- max_subdevs++;
- }
-
- /* No subdevs to add? Return here. */
- if (max_subdevs == notifier->max_subdevs)
- return 0;
-
- ret = v4l2_async_notifier_realloc(notifier, max_subdevs);
- if (ret)
- return ret;
-
- for (fwnode = NULL; (fwnode = fwnode_graph_get_next_endpoint(
- dev_fwnode(dev), fwnode)); ) {
+ fwnode_graph_for_each_endpoint(dev_fwnode(dev), fwnode) {
struct fwnode_handle *dev_fwnode;
bool is_available;
@@ -473,13 +689,11 @@ static int __v4l2_async_notifier_parse_fwnode_endpoints(
continue;
}
- if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
- ret = -EINVAL;
- break;
- }
-
- ret = v4l2_async_notifier_fwnode_parse_endpoint(
- dev, notifier, fwnode, asd_struct_size, parse_endpoint);
+ ret = v4l2_async_notifier_fwnode_parse_endpoint(dev,
+ notifier,
+ fwnode,
+ asd_struct_size,
+ parse_endpoint);
if (ret < 0)
break;
}
@@ -489,27 +703,29 @@ static int __v4l2_async_notifier_parse_fwnode_endpoints(
return ret;
}
-int v4l2_async_notifier_parse_fwnode_endpoints(
- struct device *dev, struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- int (*parse_endpoint)(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd))
+int
+v4l2_async_notifier_parse_fwnode_endpoints(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ parse_endpoint_func parse_endpoint)
{
- return __v4l2_async_notifier_parse_fwnode_endpoints(
- dev, notifier, asd_struct_size, 0, false, parse_endpoint);
+ return __v4l2_async_notifier_parse_fwnode_ep(dev, notifier,
+ asd_struct_size, 0,
+ false, parse_endpoint);
}
EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints);
-int v4l2_async_notifier_parse_fwnode_endpoints_by_port(
- struct device *dev, struct v4l2_async_notifier *notifier,
- size_t asd_struct_size, unsigned int port,
- int (*parse_endpoint)(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd))
+int
+v4l2_async_notifier_parse_fwnode_endpoints_by_port(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ unsigned int port,
+ parse_endpoint_func parse_endpoint)
{
- return __v4l2_async_notifier_parse_fwnode_endpoints(
- dev, notifier, asd_struct_size, port, true, parse_endpoint);
+ return __v4l2_async_notifier_parse_fwnode_ep(dev, notifier,
+ asd_struct_size,
+ port, true,
+ parse_endpoint);
}
EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints_by_port);
@@ -524,17 +740,18 @@ EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints_by_port);
* -ENOMEM if memory allocation failed
* -EINVAL if property parsing failed
*/
-static int v4l2_fwnode_reference_parse(
- struct device *dev, struct v4l2_async_notifier *notifier,
- const char *prop)
+static int v4l2_fwnode_reference_parse(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ const char *prop)
{
struct fwnode_reference_args args;
unsigned int index;
int ret;
for (index = 0;
- !(ret = fwnode_property_get_reference_args(
- dev_fwnode(dev), prop, NULL, 0, index, &args));
+ !(ret = fwnode_property_get_reference_args(dev_fwnode(dev),
+ prop, NULL, 0,
+ index, &args));
index++)
fwnode_handle_put(args.fwnode);
@@ -548,31 +765,25 @@ static int v4l2_fwnode_reference_parse(
if (ret != -ENOENT && ret != -ENODATA)
return ret;
- ret = v4l2_async_notifier_realloc(notifier,
- notifier->num_subdevs + index);
- if (ret)
- return ret;
-
- for (index = 0; !fwnode_property_get_reference_args(
- dev_fwnode(dev), prop, NULL, 0, index, &args);
+ for (index = 0;
+ !fwnode_property_get_reference_args(dev_fwnode(dev), prop, NULL,
+ 0, index, &args);
index++) {
struct v4l2_async_subdev *asd;
- if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
- ret = -EINVAL;
- goto error;
- }
+ asd = v4l2_async_notifier_add_fwnode_subdev(notifier,
+ args.fwnode,
+ sizeof(*asd));
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ /* not an error if asd already exists */
+ if (ret == -EEXIST) {
+ fwnode_handle_put(args.fwnode);
+ continue;
+ }
- asd = kzalloc(sizeof(*asd), GFP_KERNEL);
- if (!asd) {
- ret = -ENOMEM;
goto error;
}
-
- notifier->subdevs[notifier->num_subdevs] = asd;
- asd->match.fwnode = args.fwnode;
- asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
- notifier->num_subdevs++;
}
return 0;
@@ -738,9 +949,12 @@ error:
* -EINVAL if property parsing otherwise failed
* -ENOMEM if memory allocation failed
*/
-static struct fwnode_handle *v4l2_fwnode_reference_get_int_prop(
- struct fwnode_handle *fwnode, const char *prop, unsigned int index,
- const char * const *props, unsigned int nprops)
+static struct fwnode_handle *
+v4l2_fwnode_reference_get_int_prop(struct fwnode_handle *fwnode,
+ const char *prop,
+ unsigned int index,
+ const char * const *props,
+ unsigned int nprops)
{
struct fwnode_reference_args fwnode_args;
u64 *args = fwnode_args.args;
@@ -792,6 +1006,12 @@ static struct fwnode_handle *v4l2_fwnode_reference_get_int_prop(
return fwnode;
}
+struct v4l2_fwnode_int_props {
+ const char *name;
+ const char * const *props;
+ unsigned int nprops;
+};
+
/*
* v4l2_fwnode_reference_parse_int_props - parse references for async
* sub-devices
@@ -815,13 +1035,17 @@ static struct fwnode_handle *v4l2_fwnode_reference_get_int_prop(
* -EINVAL if property parsing otherwisefailed
* -ENOMEM if memory allocation failed
*/
-static int v4l2_fwnode_reference_parse_int_props(
- struct device *dev, struct v4l2_async_notifier *notifier,
- const char *prop, const char * const *props, unsigned int nprops)
+static int
+v4l2_fwnode_reference_parse_int_props(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ const struct v4l2_fwnode_int_props *p)
{
struct fwnode_handle *fwnode;
unsigned int index;
int ret;
+ const char *prop = p->name;
+ const char * const *props = p->props;
+ unsigned int nprops = p->nprops;
index = 0;
do {
@@ -843,31 +1067,26 @@ static int v4l2_fwnode_reference_parse_int_props(
index++;
} while (1);
- ret = v4l2_async_notifier_realloc(notifier,
- notifier->num_subdevs + index);
- if (ret)
- return -ENOMEM;
-
- for (index = 0; !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(
- dev_fwnode(dev), prop, index, props,
- nprops))); index++) {
+ for (index = 0;
+ !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev),
+ prop, index,
+ props,
+ nprops)));
+ index++) {
struct v4l2_async_subdev *asd;
- if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
- ret = -EINVAL;
- goto error;
- }
+ asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
+ sizeof(*asd));
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ /* not an error if asd already exists */
+ if (ret == -EEXIST) {
+ fwnode_handle_put(fwnode);
+ continue;
+ }
- asd = kzalloc(sizeof(struct v4l2_async_subdev), GFP_KERNEL);
- if (!asd) {
- ret = -ENOMEM;
goto error;
}
-
- notifier->subdevs[notifier->num_subdevs] = asd;
- asd->match.fwnode = fwnode;
- asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
- notifier->num_subdevs++;
}
return PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode);
@@ -877,15 +1096,11 @@ error:
return ret;
}
-int v4l2_async_notifier_parse_fwnode_sensor_common(
- struct device *dev, struct v4l2_async_notifier *notifier)
+int v4l2_async_notifier_parse_fwnode_sensor_common(struct device *dev,
+ struct v4l2_async_notifier *notifier)
{
static const char * const led_props[] = { "led" };
- static const struct {
- const char *name;
- const char * const *props;
- unsigned int nprops;
- } props[] = {
+ static const struct v4l2_fwnode_int_props props[] = {
{ "flash-leds", led_props, ARRAY_SIZE(led_props) },
{ "lens-focus", NULL, 0 },
};
@@ -895,12 +1110,12 @@ int v4l2_async_notifier_parse_fwnode_sensor_common(
int ret;
if (props[i].props && is_acpi_node(dev_fwnode(dev)))
- ret = v4l2_fwnode_reference_parse_int_props(
- dev, notifier, props[i].name,
- props[i].props, props[i].nprops);
+ ret = v4l2_fwnode_reference_parse_int_props(dev,
+ notifier,
+ &props[i]);
else
- ret = v4l2_fwnode_reference_parse(
- dev, notifier, props[i].name);
+ ret = v4l2_fwnode_reference_parse(dev, notifier,
+ props[i].name);
if (ret && ret != -ENOENT) {
dev_warn(dev, "parsing property \"%s\" failed (%d)\n",
props[i].name, ret);
@@ -924,6 +1139,8 @@ int v4l2_async_register_subdev_sensor_common(struct v4l2_subdev *sd)
if (!notifier)
return -ENOMEM;
+ v4l2_async_notifier_init(notifier);
+
ret = v4l2_async_notifier_parse_fwnode_sensor_common(sd->dev,
notifier);
if (ret < 0)
@@ -952,6 +1169,68 @@ out_cleanup:
}
EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor_common);
+int v4l2_async_register_fwnode_subdev(struct v4l2_subdev *sd,
+ size_t asd_struct_size,
+ unsigned int *ports,
+ unsigned int num_ports,
+ parse_endpoint_func parse_endpoint)
+{
+ struct v4l2_async_notifier *notifier;
+ struct device *dev = sd->dev;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ if (WARN_ON(!dev))
+ return -ENODEV;
+
+ fwnode = dev_fwnode(dev);
+ if (!fwnode_device_is_available(fwnode))
+ return -ENODEV;
+
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+ if (!notifier)
+ return -ENOMEM;
+
+ v4l2_async_notifier_init(notifier);
+
+ if (!ports) {
+ ret = v4l2_async_notifier_parse_fwnode_endpoints(dev, notifier,
+ asd_struct_size,
+ parse_endpoint);
+ if (ret < 0)
+ goto out_cleanup;
+ } else {
+ unsigned int i;
+
+ for (i = 0; i < num_ports; i++) {
+ ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(dev, notifier, asd_struct_size, ports[i], parse_endpoint);
+ if (ret < 0)
+ goto out_cleanup;
+ }
+ }
+
+ ret = v4l2_async_subdev_notifier_register(sd, notifier);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0)
+ goto out_unregister;
+
+ sd->subdev_notifier = notifier;
+
+ return 0;
+
+out_unregister:
+ v4l2_async_notifier_unregister(notifier);
+out_cleanup:
+ v4l2_async_notifier_cleanup(notifier);
+ kfree(notifier);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_register_fwnode_subdev);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 54afc9c7ee6e..c63746968fa3 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -121,7 +121,7 @@ int v4l2_video_std_construct(struct v4l2_standard *vs,
vs->id = id;
v4l2_video_std_frame_period(id, &vs->frameperiod);
vs->framelines = (id & V4L2_STD_525_60) ? 525 : 625;
- strlcpy(vs->name, name, sizeof(vs->name));
+ strscpy(vs->name, name, sizeof(vs->name));
return 0;
}
EXPORT_SYMBOL(v4l2_video_std_construct);
@@ -474,13 +474,13 @@ static void v4l_print_buffer(const void *arg, bool write_only)
const struct v4l2_plane *plane;
int i;
- pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, flags=0x%08x, field=%s, sequence=%d, memory=%s",
+ pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s",
p->timestamp.tv_sec / 3600,
(int)(p->timestamp.tv_sec / 60) % 60,
(int)(p->timestamp.tv_sec % 60),
(long)p->timestamp.tv_usec,
p->index,
- prt_names(p->type, v4l2_type_names),
+ prt_names(p->type, v4l2_type_names), p->request_fd,
p->flags, prt_names(p->field, v4l2_field_names),
p->sequence, prt_names(p->memory, v4l2_memory_names));
@@ -590,8 +590,8 @@ static void v4l_print_ext_controls(const void *arg, bool write_only)
const struct v4l2_ext_controls *p = arg;
int i;
- pr_cont("which=0x%x, count=%d, error_idx=%d",
- p->which, p->count, p->error_idx);
+ pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d",
+ p->which, p->count, p->error_idx, p->request_fd);
for (i = 0; i < p->count; i++) {
if (!p->controls[i].size)
pr_cont(", id/val=0x%x/0x%x",
@@ -907,7 +907,7 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
__u32 i;
/* zero the reserved fields */
- c->reserved[0] = c->reserved[1] = 0;
+ c->reserved[0] = 0;
for (i = 0; i < c->count; i++)
c->controls[i].reserved2[0] = 0;
@@ -1309,6 +1309,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_H263: descr = "H.263"; break;
case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break;
case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break;
+ case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break;
case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 part 2 ES"; break;
case V4L2_PIX_FMT_XVID: descr = "Xvid"; break;
case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break;
@@ -1336,6 +1337,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break;
case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break;
case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
+ case V4L2_PIX_FMT_SUNXI_TILED_NV12: descr = "Sunxi Tiled NV12 Format"; break;
default:
WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
if (fmt->description[0])
@@ -1352,7 +1354,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
}
if (descr)
- WARN_ON(strlcpy(fmt->description, descr, sz) >= sz);
+ WARN_ON(strscpy(fmt->description, descr, sz) >= sz);
fmt->flags = flags;
}
@@ -1877,7 +1879,7 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(p, memory);
+ CLEAR_AFTER_FIELD(p, capabilities);
return ops->vidioc_reqbufs(file, fh, p);
}
@@ -1918,7 +1920,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(create, format);
+ CLEAR_AFTER_FIELD(create, capabilities);
v4l_sanitize_format(&create->format);
@@ -2109,9 +2111,9 @@ static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
+ return v4l2_g_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
@@ -2128,9 +2130,9 @@ static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
+ return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
@@ -2147,9 +2149,9 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_try_ext_ctrls(vfh->ctrl_handler, p);
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
+ return v4l2_try_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_try_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
@@ -2391,7 +2393,7 @@ static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
p->flags |= V4L2_CHIP_FL_WRITABLE;
if (ops->vidioc_g_register)
p->flags |= V4L2_CHIP_FL_READABLE;
- strlcpy(p->name, vfd->v4l2_dev->name, sizeof(p->name));
+ strscpy(p->name, vfd->v4l2_dev->name, sizeof(p->name));
if (ops->vidioc_g_chip_info)
return ops->vidioc_g_chip_info(file, fh, arg);
if (p->match.addr)
@@ -2408,7 +2410,7 @@ static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
p->flags |= V4L2_CHIP_FL_WRITABLE;
if (sd->ops->core && sd->ops->core->g_register)
p->flags |= V4L2_CHIP_FL_READABLE;
- strlcpy(p->name, sd->name, sizeof(p->name));
+ strscpy(p->name, sd->name, sizeof(p->name));
return 0;
}
break;
@@ -2780,6 +2782,7 @@ static long __video_do_ioctl(struct file *file,
unsigned int cmd, void *arg)
{
struct video_device *vfd = video_devdata(file);
+ struct mutex *req_queue_lock = NULL;
struct mutex *lock; /* ioctl serialization mutex */
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
bool write_only = false;
@@ -2799,10 +2802,27 @@ static long __video_do_ioctl(struct file *file,
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
vfh = file->private_data;
+ /*
+ * We need to serialize streamon/off with queueing new requests.
+ * These ioctls may trigger the cancellation of a streaming
+ * operation, and that should not be mixed with queueing a new
+ * request at the same time.
+ */
+ if (v4l2_device_supports_requests(vfd->v4l2_dev) &&
+ (cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) {
+ req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex;
+
+ if (mutex_lock_interruptible(req_queue_lock))
+ return -ERESTARTSYS;
+ }
+
lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg);
- if (lock && mutex_lock_interruptible(lock))
+ if (lock && mutex_lock_interruptible(lock)) {
+ if (req_queue_lock)
+ mutex_unlock(req_queue_lock);
return -ERESTARTSYS;
+ }
if (!video_is_registered(vfd)) {
ret = -ENODEV;
@@ -2861,6 +2881,8 @@ done:
unlock:
if (lock)
mutex_unlock(lock);
+ if (req_queue_lock)
+ mutex_unlock(req_queue_lock);
return ret;
}
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 0fc185a2ce90..014a2a97cadd 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -28,7 +28,7 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL;
bool is_webcam = false;
u32 flags;
- int ret;
+ int ret, pad_sink, pad_source;
if (!mdev)
return 0;
@@ -63,8 +63,10 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
}
/* It should have at least one I/O entity */
- if (!io_v4l && !io_vbi && !io_swradio)
+ if (!io_v4l && !io_vbi && !io_swradio) {
+ dev_warn(mdev->dev, "Didn't find any I/O entity\n");
return -EINVAL;
+ }
/*
* Here, webcams are modelled on a very simple way: the sensor is
@@ -74,8 +76,10 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
* PC-consumer's hardware.
*/
if (is_webcam) {
- if (!io_v4l)
+ if (!io_v4l) {
+ dev_warn(mdev->dev, "Didn't find a MEDIA_ENT_F_IO_V4L\n");
return -EINVAL;
+ }
media_device_for_each_entity(entity, mdev) {
if (entity->function != MEDIA_ENT_F_CAM_SENSOR)
@@ -83,46 +87,91 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
ret = media_create_pad_link(entity, 0,
io_v4l, 0,
MEDIA_LNK_FL_ENABLED);
- if (ret)
+ if (ret) {
+ dev_warn(mdev->dev, "Failed to create a sensor link\n");
return ret;
+ }
}
if (!decoder)
return 0;
}
/* The device isn't a webcam. So, it should have a decoder */
- if (!decoder)
+ if (!decoder) {
+ dev_warn(mdev->dev, "Decoder not found\n");
return -EINVAL;
+ }
/* Link the tuner and IF video output pads */
if (tuner) {
if (if_vid) {
- ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT,
- if_vid,
- IF_VID_DEC_PAD_IF_INPUT,
+ pad_source = media_get_pad_index(tuner, false,
+ PAD_SIGNAL_ANALOG);
+ pad_sink = media_get_pad_index(if_vid, true,
+ PAD_SIGNAL_ANALOG);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "Couldn't get tuner and/or PLL pad(s): (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(tuner, pad_source,
+ if_vid, pad_sink,
MEDIA_LNK_FL_ENABLED);
- if (ret)
+ if (ret) {
+ dev_warn(mdev->dev, "Couldn't create tuner->PLL link)\n");
return ret;
- ret = media_create_pad_link(if_vid, IF_VID_DEC_PAD_OUT,
- decoder, DEMOD_PAD_IF_INPUT,
- MEDIA_LNK_FL_ENABLED);
- if (ret)
+ }
+
+ pad_source = media_get_pad_index(if_vid, false,
+ PAD_SIGNAL_ANALOG);
+ pad_sink = media_get_pad_index(decoder, true,
+ PAD_SIGNAL_ANALOG);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "get decoder and/or PLL pad(s): (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(if_vid, pad_source,
+ decoder, pad_sink,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link PLL to decoder\n");
return ret;
+ }
} else {
- ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT,
- decoder, DEMOD_PAD_IF_INPUT,
- MEDIA_LNK_FL_ENABLED);
+ pad_source = media_get_pad_index(tuner, false,
+ PAD_SIGNAL_ANALOG);
+ pad_sink = media_get_pad_index(decoder, true,
+ PAD_SIGNAL_ANALOG);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s): (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(tuner, pad_source,
+ decoder, pad_sink,
+ MEDIA_LNK_FL_ENABLED);
if (ret)
return ret;
}
if (if_aud) {
- ret = media_create_pad_link(tuner, TUNER_PAD_AUD_OUT,
- if_aud,
- IF_AUD_DEC_PAD_IF_INPUT,
+ pad_source = media_get_pad_index(tuner, false,
+ PAD_SIGNAL_AUDIO);
+ pad_sink = media_get_pad_index(if_aud, true,
+ PAD_SIGNAL_AUDIO);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s) for audio: (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(tuner, pad_source,
+ if_aud, pad_sink,
MEDIA_LNK_FL_ENABLED);
- if (ret)
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link tuner->audio PLL\n");
return ret;
+ }
} else {
if_aud = tuner;
}
@@ -131,27 +180,48 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
/* Create demod to V4L, VBI and SDR radio links */
if (io_v4l) {
- ret = media_create_pad_link(decoder, DEMOD_PAD_VID_OUT,
- io_v4l, 0,
- MEDIA_LNK_FL_ENABLED);
- if (ret)
+ pad_source = media_get_pad_index(decoder, false, PAD_SIGNAL_DV);
+ if (pad_source < 0) {
+ dev_warn(mdev->dev, "couldn't get decoder output pad for V4L I/O\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(decoder, pad_source,
+ io_v4l, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link decoder output to V4L I/O\n");
return ret;
+ }
}
if (io_swradio) {
- ret = media_create_pad_link(decoder, DEMOD_PAD_VID_OUT,
- io_swradio, 0,
- MEDIA_LNK_FL_ENABLED);
- if (ret)
+ pad_source = media_get_pad_index(decoder, false, PAD_SIGNAL_DV);
+ if (pad_source < 0) {
+ dev_warn(mdev->dev, "couldn't get decoder output pad for SDR\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(decoder, pad_source,
+ io_swradio, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link decoder output to SDR\n");
return ret;
+ }
}
if (io_vbi) {
- ret = media_create_pad_link(decoder, DEMOD_PAD_VBI_OUT,
+ pad_source = media_get_pad_index(decoder, false, PAD_SIGNAL_DV);
+ if (pad_source < 0) {
+ dev_warn(mdev->dev, "couldn't get decoder output pad for VBI\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(decoder, pad_source,
io_vbi, 0,
MEDIA_LNK_FL_ENABLED);
- if (ret)
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link decoder output to VBI\n");
return ret;
+ }
}
/* Create links for the media connectors */
@@ -161,15 +231,26 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
case MEDIA_ENT_F_CONN_RF:
if (!tuner)
continue;
-
+ pad_sink = media_get_pad_index(tuner, true,
+ PAD_SIGNAL_ANALOG);
+ if (pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get tuner analog pad sink\n");
+ return -EINVAL;
+ }
ret = media_create_pad_link(entity, 0, tuner,
- TUNER_PAD_RF_INPUT,
+ pad_sink,
flags);
break;
case MEDIA_ENT_F_CONN_SVIDEO:
case MEDIA_ENT_F_CONN_COMPOSITE:
+ pad_sink = media_get_pad_index(decoder, true,
+ PAD_SIGNAL_ANALOG);
+ if (pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get tuner analog pad sink\n");
+ return -EINVAL;
+ }
ret = media_create_pad_link(entity, 0, decoder,
- DEMOD_PAD_IF_INPUT,
+ pad_sink,
flags);
break;
default:
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index ce9bd1b91210..d7806db222d8 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -387,7 +387,7 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
if (m2m_dev->m2m_ops->job_abort)
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
- dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
+ dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
wait_event(m2m_ctx->finished,
!(m2m_ctx->job_flags & TRANS_RUNNING));
} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
@@ -473,12 +473,19 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
+ struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = vb2_qbuf(vq, buf);
- if (!ret)
+ if (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
+ dprintk("%s: requests cannot be used with capture buffers\n",
+ __func__);
+ return -EPERM;
+ }
+ ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
+ if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
v4l2_m2m_try_schedule(m2m_ctx);
return ret;
@@ -498,15 +505,11 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
+ struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
- int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = vb2_prepare_buf(vq, buf);
- if (!ret)
- v4l2_m2m_try_schedule(m2m_ctx);
-
- return ret;
+ return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
@@ -950,6 +953,52 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+void vb2_m2m_request_queue(struct media_request *req)
+{
+ struct media_request_object *obj, *obj_safe;
+ struct v4l2_m2m_ctx *m2m_ctx = NULL;
+
+ /*
+ * Queue all objects. Note that buffer objects are at the end of the
+ * objects list, after all other object types. Once buffer objects
+ * are queued, the driver might delete them immediately (if the driver
+ * processes the buffer at once), so we have to use
+ * list_for_each_entry_safe() to handle the case where the object we
+ * queue is deleted.
+ */
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
+ struct v4l2_m2m_ctx *m2m_ctx_obj;
+ struct vb2_buffer *vb;
+
+ if (!obj->ops->queue)
+ continue;
+
+ if (vb2_request_object_is_buffer(obj)) {
+ /* Sanity checks */
+ vb = container_of(obj, struct vb2_buffer, req_obj);
+ WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
+ m2m_ctx_obj = container_of(vb->vb2_queue,
+ struct v4l2_m2m_ctx,
+ out_q_ctx.q);
+ WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
+ m2m_ctx = m2m_ctx_obj;
+ }
+
+ /*
+ * The buffer we queue here can in theory be immediately
+ * unbound, hence the use of list_for_each_entry_safe()
+ * above and why we call the queue op last.
+ */
+ obj->ops->queue(obj);
+ }
+
+ WARN_ON(!m2m_ctx);
+
+ if (m2m_ctx)
+ v4l2_m2m_try_schedule(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(vb2_m2m_request_queue);
+
/* Videobuf2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 2b63fa6b6fc9..f5f0d71ec745 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -222,17 +222,20 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_G_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
- return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg);
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler,
+ sd->v4l2_dev->mdev, arg);
case VIDIOC_S_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
- return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg);
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
+ sd->v4l2_dev->mdev, arg);
case VIDIOC_TRY_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
- return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg);
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler,
+ sd->v4l2_dev->mdev, arg);
case VIDIOC_DQEVENT:
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
@@ -273,7 +276,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
p->flags |= V4L2_CHIP_FL_WRITABLE;
if (sd->ops->core && sd->ops->core->g_register)
p->flags |= V4L2_CHIP_FL_READABLE;
- strlcpy(p->name, sd->name, sizeof(p->name));
+ strscpy(p->name, sd->name, sizeof(p->name));
return 0;
}
#endif