diff options
Diffstat (limited to 'drivers/gpu/host1x')
| -rw-r--r-- | drivers/gpu/host1x/bus.c | 41 | ||||
| -rw-r--r-- | drivers/gpu/host1x/cdma.c | 11 | ||||
| -rw-r--r-- | drivers/gpu/host1x/debug.c | 14 | ||||
| -rw-r--r-- | drivers/gpu/host1x/dev.c | 6 | ||||
| -rw-r--r-- | drivers/gpu/host1x/dev.h | 13 | ||||
| -rw-r--r-- | drivers/gpu/host1x/hw/cdma_hw.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/host1x/hw/channel_hw.c | 10 | ||||
| -rw-r--r-- | drivers/gpu/host1x/hw/debug_hw.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/host1x/hw/hw_host1x07_vm.h | 2 | ||||
| -rw-r--r-- | drivers/gpu/host1x/intr.c | 28 | ||||
| -rw-r--r-- | drivers/gpu/host1x/intr.h | 4 | ||||
| -rw-r--r-- | drivers/gpu/host1x/job.c | 5 | ||||
| -rw-r--r-- | drivers/gpu/host1x/syncpt.c | 202 | ||||
| -rw-r--r-- | drivers/gpu/host1x/syncpt.h | 4 | 
14 files changed, 224 insertions, 120 deletions
| diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 347fb962b6c9..46f69c532b6b 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -197,6 +197,17 @@ int host1x_device_init(struct host1x_device *device)  	mutex_lock(&device->clients_lock);  	list_for_each_entry(client, &device->clients, list) { +		if (client->ops && client->ops->early_init) { +			err = client->ops->early_init(client); +			if (err < 0) { +				dev_err(&device->dev, "failed to early initialize %s: %d\n", +					dev_name(client->dev), err); +				goto teardown_late; +			} +		} +	} + +	list_for_each_entry(client, &device->clients, list) {  		if (client->ops && client->ops->init) {  			err = client->ops->init(client);  			if (err < 0) { @@ -217,6 +228,14 @@ teardown:  		if (client->ops->exit)  			client->ops->exit(client); +	/* reset client to end of list for late teardown */ +	client = list_entry(&device->clients, struct host1x_client, list); + +teardown_late: +	list_for_each_entry_continue_reverse(client, &device->clients, list) +		if (client->ops->late_exit) +			client->ops->late_exit(client); +  	mutex_unlock(&device->clients_lock);  	return err;  } @@ -251,6 +270,18 @@ int host1x_device_exit(struct host1x_device *device)  		}  	} +	list_for_each_entry_reverse(client, &device->clients, list) { +		if (client->ops && client->ops->late_exit) { +			err = client->ops->late_exit(client); +			if (err < 0) { +				dev_err(&device->dev, "failed to late cleanup %s: %d\n", +					dev_name(client->dev), err); +				mutex_unlock(&device->clients_lock); +				return err; +			} +		} +	} +  	mutex_unlock(&device->clients_lock);  	return 0; @@ -705,8 +736,9 @@ void host1x_driver_unregister(struct host1x_driver *driver)  EXPORT_SYMBOL(host1x_driver_unregister);  /** - * host1x_client_register() - register a host1x client + * __host1x_client_register() - register a host1x client   * @client: host1x client + * @key: lock class key for the client-specific mutex   *   * Registers a host1x client with each host1x controller instance. Note that   * each client will only match their parent host1x controller and will only be @@ -715,13 +747,14 @@ EXPORT_SYMBOL(host1x_driver_unregister);   * device and call host1x_device_init(), which will in turn call each client's   * &host1x_client_ops.init implementation.   */ -int host1x_client_register(struct host1x_client *client) +int __host1x_client_register(struct host1x_client *client, +			     struct lock_class_key *key)  {  	struct host1x *host1x;  	int err;  	INIT_LIST_HEAD(&client->list); -	mutex_init(&client->lock); +	__mutex_init(&client->lock, "host1x client lock", key);  	client->usecount = 0;  	mutex_lock(&devices_lock); @@ -742,7 +775,7 @@ int host1x_client_register(struct host1x_client *client)  	return 0;  } -EXPORT_SYMBOL(host1x_client_register); +EXPORT_SYMBOL(__host1x_client_register);  /**   * host1x_client_unregister() - unregister a host1x client diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c index e8d3fda91d8a..6e6ca774f68d 100644 --- a/drivers/gpu/host1x/cdma.c +++ b/drivers/gpu/host1x/cdma.c @@ -273,15 +273,13 @@ static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,  static void cdma_start_timer_locked(struct host1x_cdma *cdma,  				    struct host1x_job *job)  { -	struct host1x *host = cdma_to_host1x(cdma); -  	if (cdma->timeout.client) {  		/* timer already started */  		return;  	}  	cdma->timeout.client = job->client; -	cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id); +	cdma->timeout.syncpt = job->syncpt;  	cdma->timeout.syncpt_val = job->syncpt_end;  	cdma->timeout.start_ktime = ktime_get(); @@ -312,7 +310,6 @@ static void stop_cdma_timer_locked(struct host1x_cdma *cdma)  static void update_cdma_locked(struct host1x_cdma *cdma)  {  	bool signal = false; -	struct host1x *host1x = cdma_to_host1x(cdma);  	struct host1x_job *job, *n;  	/* If CDMA is stopped, queue is cleared and we can return */ @@ -324,8 +321,7 @@ static void update_cdma_locked(struct host1x_cdma *cdma)  	 * to consume as many sync queue entries as possible without blocking  	 */  	list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { -		struct host1x_syncpt *sp = -			host1x_syncpt_get(host1x, job->syncpt_id); +		struct host1x_syncpt *sp = job->syncpt;  		/* Check whether this syncpt has completed, and bail if not */  		if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) { @@ -499,8 +495,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)  		if (!cdma->timeout.initialized) {  			int err; -			err = host1x_hw_cdma_timeout_init(host1x, cdma, -							  job->syncpt_id); +			err = host1x_hw_cdma_timeout_init(host1x, cdma);  			if (err) {  				mutex_unlock(&cdma->lock);  				return err; diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c index 1b4997bda1c7..8a14880c61bb 100644 --- a/drivers/gpu/host1x/debug.c +++ b/drivers/gpu/host1x/debug.c @@ -69,6 +69,7 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)  static void show_syncpts(struct host1x *m, struct output *o)  { +	struct list_head *pos;  	unsigned int i;  	host1x_debug_output(o, "---- syncpts ----\n"); @@ -76,12 +77,19 @@ static void show_syncpts(struct host1x *m, struct output *o)  	for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {  		u32 max = host1x_syncpt_read_max(m->syncpt + i);  		u32 min = host1x_syncpt_load(m->syncpt + i); +		unsigned int waiters = 0; -		if (!min && !max) +		spin_lock(&m->syncpt[i].intr.lock); +		list_for_each(pos, &m->syncpt[i].intr.wait_head) +			waiters++; +		spin_unlock(&m->syncpt[i].intr.lock); + +		if (!min && !max && !waiters)  			continue; -		host1x_debug_output(o, "id %u (%s) min %d max %d\n", -				    i, m->syncpt[i].name, min, max); +		host1x_debug_output(o, +				    "id %u (%s) min %d max %d (%d waiters)\n", +				    i, m->syncpt[i].name, min, max, waiters);  	}  	for (i = 0; i < host1x_syncpt_nb_bases(m); i++) { diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index d0ebb70e2fdd..fbb6447b8659 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -77,6 +77,7 @@ static const struct host1x_info host1x01_info = {  	.has_hypervisor = false,  	.num_sid_entries = 0,  	.sid_table = NULL, +	.reserve_vblank_syncpts = true,  };  static const struct host1x_info host1x02_info = { @@ -91,6 +92,7 @@ static const struct host1x_info host1x02_info = {  	.has_hypervisor = false,  	.num_sid_entries = 0,  	.sid_table = NULL, +	.reserve_vblank_syncpts = true,  };  static const struct host1x_info host1x04_info = { @@ -105,6 +107,7 @@ static const struct host1x_info host1x04_info = {  	.has_hypervisor = false,  	.num_sid_entries = 0,  	.sid_table = NULL, +	.reserve_vblank_syncpts = false,  };  static const struct host1x_info host1x05_info = { @@ -119,6 +122,7 @@ static const struct host1x_info host1x05_info = {  	.has_hypervisor = false,  	.num_sid_entries = 0,  	.sid_table = NULL, +	.reserve_vblank_syncpts = false,  };  static const struct host1x_sid_entry tegra186_sid_table[] = { @@ -142,6 +146,7 @@ static const struct host1x_info host1x06_info = {  	.has_hypervisor = true,  	.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),  	.sid_table = tegra186_sid_table, +	.reserve_vblank_syncpts = false,  };  static const struct host1x_sid_entry tegra194_sid_table[] = { @@ -165,6 +170,7 @@ static const struct host1x_info host1x07_info = {  	.has_hypervisor = true,  	.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),  	.sid_table = tegra194_sid_table, +	.reserve_vblank_syncpts = false,  };  static const struct of_device_id host1x_of_match[] = { diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index f781a9b0f39d..fa6d4bc46e98 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -37,7 +37,7 @@ struct host1x_cdma_ops {  	void (*start)(struct host1x_cdma *cdma);  	void (*stop)(struct host1x_cdma *cdma);  	void (*flush)(struct  host1x_cdma *cdma); -	int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt); +	int (*timeout_init)(struct host1x_cdma *cdma);  	void (*timeout_destroy)(struct host1x_cdma *cdma);  	void (*freeze)(struct host1x_cdma *cdma);  	void (*resume)(struct host1x_cdma *cdma, u32 getptr); @@ -101,6 +101,12 @@ struct host1x_info {  	bool has_hypervisor; /* has hypervisor registers */  	unsigned int num_sid_entries;  	const struct host1x_sid_entry *sid_table; +	/* +	 * On T20-T148, the boot chain may setup DC to increment syncpoints +	 * 26/27 on VBLANK. As such we cannot use these syncpoints until +	 * the display driver disables VBLANK increments. +	 */ +	bool reserve_vblank_syncpts;  };  struct host1x { @@ -261,10 +267,9 @@ static inline void host1x_hw_cdma_flush(struct host1x *host,  }  static inline int host1x_hw_cdma_timeout_init(struct host1x *host, -					      struct host1x_cdma *cdma, -					      unsigned int syncpt) +					      struct host1x_cdma *cdma)  { -	return host->cdma_op->timeout_init(cdma, syncpt); +	return host->cdma_op->timeout_init(cdma);  }  static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host, diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c index 2f3bf94cf365..e49cd5b8f735 100644 --- a/drivers/gpu/host1x/hw/cdma_hw.c +++ b/drivers/gpu/host1x/hw/cdma_hw.c @@ -295,7 +295,7 @@ static void cdma_timeout_handler(struct work_struct *work)  /*   * Init timeout resources   */ -static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt) +static int cdma_timeout_init(struct host1x_cdma *cdma)  {  	INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);  	cdma->timeout.initialized = true; diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c index 5eaa29d171c9..d4c28faf27d1 100644 --- a/drivers/gpu/host1x/hw/channel_hw.c +++ b/drivers/gpu/host1x/hw/channel_hw.c @@ -86,8 +86,7 @@ static void submit_gathers(struct host1x_job *job)  static inline void synchronize_syncpt_base(struct host1x_job *job)  { -	struct host1x *host = dev_get_drvdata(job->channel->dev->parent); -	struct host1x_syncpt *sp = host->syncpt + job->syncpt_id; +	struct host1x_syncpt *sp = job->syncpt;  	unsigned int id;  	u32 value; @@ -118,7 +117,7 @@ static void host1x_channel_set_streamid(struct host1x_channel *channel)  static int channel_submit(struct host1x_job *job)  {  	struct host1x_channel *ch = job->channel; -	struct host1x_syncpt *sp; +	struct host1x_syncpt *sp = job->syncpt;  	u32 user_syncpt_incrs = job->syncpt_incrs;  	u32 prev_max = 0;  	u32 syncval; @@ -126,10 +125,9 @@ static int channel_submit(struct host1x_job *job)  	struct host1x_waitlist *completed_waiter = NULL;  	struct host1x *host = dev_get_drvdata(ch->dev->parent); -	sp = host->syncpt + job->syncpt_id;  	trace_host1x_channel_submit(dev_name(ch->dev),  				    job->num_gathers, job->num_relocs, -				    job->syncpt_id, job->syncpt_incrs); +				    job->syncpt->id, job->syncpt_incrs);  	/* before error checks, return current max */  	prev_max = job->syncpt_end = host1x_syncpt_read_max(sp); @@ -163,7 +161,7 @@ static int channel_submit(struct host1x_job *job)  		host1x_cdma_push(&ch->cdma,  				 host1x_opcode_setclass(HOST1X_CLASS_HOST1X,  					host1x_uclass_wait_syncpt_r(), 1), -				 host1x_class_host_wait_syncpt(job->syncpt_id, +				 host1x_class_host_wait_syncpt(job->syncpt->id,  					host1x_syncpt_read_max(sp)));  	} diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c index f31bcfa1b837..ceb48229d14b 100644 --- a/drivers/gpu/host1x/hw/debug_hw.c +++ b/drivers/gpu/host1x/hw/debug_hw.c @@ -204,7 +204,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)  		unsigned int i;  		host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n", -				    job, job->syncpt_id, job->syncpt_end, +				    job, job->syncpt->id, job->syncpt_end,  				    job->first_get, job->timeout,  				    job->num_slots, job->num_unpins); diff --git a/drivers/gpu/host1x/hw/hw_host1x07_vm.h b/drivers/gpu/host1x/hw/hw_host1x07_vm.h index 3058b3c9a91d..b766851d5b83 100644 --- a/drivers/gpu/host1x/hw/hw_host1x07_vm.h +++ b/drivers/gpu/host1x/hw/hw_host1x07_vm.h @@ -29,6 +29,6 @@  #define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x)	(0x652c + 4 * (x))  #define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x)	(0x6590 + 4 * (x))  #define HOST1X_SYNC_SYNCPT(x)				(0x8080 + 4 * (x)) -#define HOST1X_SYNC_SYNCPT_INT_THRESH(x)		(0x8d00 + 4 * (x)) +#define HOST1X_SYNC_SYNCPT_INT_THRESH(x)		(0x9980 + 4 * (x))  #define HOST1X_SYNC_SYNCPT_CH_APP(x)			(0xa604 + 4 * (x))  #define HOST1X_SYNC_SYNCPT_CH_APP_CH(v)			(((v) & 0x3f) << 8) diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c index 9245add23b5d..6d1f3c0fdbe7 100644 --- a/drivers/gpu/host1x/intr.c +++ b/drivers/gpu/host1x/intr.c @@ -235,25 +235,37 @@ int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,  			host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);  	} -	spin_unlock(&syncpt->intr.lock); -  	if (ref)  		*ref = waiter; + +	spin_unlock(&syncpt->intr.lock); +  	return 0;  } -void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref) +void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref, +			 bool flush)  {  	struct host1x_waitlist *waiter = ref;  	struct host1x_syncpt *syncpt; -	while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) == -	       WLS_REMOVED) -		schedule(); +	atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED);  	syncpt = host->syncpt + id; -	(void)process_wait_list(host, syncpt, -				host1x_syncpt_load(host->syncpt + id)); + +	spin_lock(&syncpt->intr.lock); +	if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) == +	    WLS_CANCELLED) { +		list_del(&waiter->list); +		kref_put(&waiter->refcount, waiter_release); +	} +	spin_unlock(&syncpt->intr.lock); + +	if (flush) { +		/* Wait until any concurrently executing handler has finished. */ +		while (atomic_read(&waiter->state) != WLS_HANDLED) +			schedule(); +	}  	kref_put(&waiter->refcount, waiter_release);  } diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h index aac38194398f..6ea55e615e3a 100644 --- a/drivers/gpu/host1x/intr.h +++ b/drivers/gpu/host1x/intr.h @@ -74,8 +74,10 @@ int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,   * Unreference an action submitted to host1x_intr_add_action().   * You must call this if you passed non-NULL as ref.   * @ref the ref returned from host1x_intr_add_action() + * @flush wait until any pending handlers have completed before returning.   */ -void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref); +void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref, +			 bool flush);  /* Initialize host1x sync point interrupt */  int host1x_intr_init(struct host1x *host, unsigned int irq_sync); diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 82d0a60ba3f7..adbdc225de8d 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -79,6 +79,9 @@ static void job_free(struct kref *ref)  {  	struct host1x_job *job = container_of(ref, struct host1x_job, ref); +	if (job->syncpt) +		host1x_syncpt_put(job->syncpt); +  	kfree(job);  } @@ -674,7 +677,7 @@ EXPORT_SYMBOL(host1x_job_unpin);   */  void host1x_job_dump(struct device *dev, struct host1x_job *job)  { -	dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt_id); +	dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt->id);  	dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);  	dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);  	dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout); diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index fce7892d5137..e648ebbb2027 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -42,17 +42,32 @@ static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)  		base->requested = false;  } -static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, -						 struct host1x_client *client, -						 unsigned long flags) +/** + * host1x_syncpt_alloc() - allocate a syncpoint + * @host: host1x device data + * @flags: bitfield of HOST1X_SYNCPT_* flags + * @name: name for the syncpoint for use in debug prints + * + * Allocates a hardware syncpoint for the caller's use. The caller then has + * the sole authority to mutate the syncpoint's value until it is freed again. + * + * If no free syncpoints are available, or a NULL name was specified, returns + * NULL. + */ +struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, +					  unsigned long flags, +					  const char *name)  {  	struct host1x_syncpt *sp = host->syncpt; +	char *full_name;  	unsigned int i; -	char *name; + +	if (!name) +		return NULL;  	mutex_lock(&host->syncpt_mutex); -	for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++) +	for (i = 0; i < host->info->nb_pts && kref_read(&sp->ref); i++, sp++)  		;  	if (i >= host->info->nb_pts) @@ -64,19 +79,19 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,  			goto unlock;  	} -	name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id, -			 client ? dev_name(client->dev) : NULL); -	if (!name) +	full_name = kasprintf(GFP_KERNEL, "%u-%s", sp->id, name); +	if (!full_name)  		goto free_base; -	sp->client = client; -	sp->name = name; +	sp->name = full_name;  	if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)  		sp->client_managed = true;  	else  		sp->client_managed = false; +	kref_init(&sp->ref); +  	mutex_unlock(&host->syncpt_mutex);  	return sp; @@ -87,6 +102,7 @@ unlock:  	mutex_unlock(&host->syncpt_mutex);  	return NULL;  } +EXPORT_SYMBOL(host1x_syncpt_alloc);  /**   * host1x_syncpt_id() - retrieve syncpoint ID @@ -294,7 +310,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,  		}  	} -	host1x_intr_put_ref(sp->host, sp->id, ref); +	host1x_intr_put_ref(sp->host, sp->id, ref, true);  done:  	return err; @@ -307,59 +323,12 @@ EXPORT_SYMBOL(host1x_syncpt_wait);  bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)  {  	u32 current_val; -	u32 future_val;  	smp_rmb();  	current_val = (u32)atomic_read(&sp->min_val); -	future_val = (u32)atomic_read(&sp->max_val); - -	/* Note the use of unsigned arithmetic here (mod 1<<32). -	 * -	 * c = current_val = min_val	= the current value of the syncpoint. -	 * t = thresh			= the value we are checking -	 * f = future_val  = max_val	= the value c will reach when all -	 *				  outstanding increments have completed. -	 * -	 * Note that c always chases f until it reaches f. -	 * -	 * Dtf = (f - t) -	 * Dtc = (c - t) -	 * -	 *  Consider all cases: -	 * -	 *	A) .....c..t..f.....	Dtf < Dtc	need to wait -	 *	B) .....c.....f..t..	Dtf > Dtc	expired -	 *	C) ..t..c.....f.....	Dtf > Dtc	expired	   (Dct very large) -	 * -	 *  Any case where f==c: always expired (for any t).	Dtf == Dcf -	 *  Any case where t==c: always expired (for any f).	Dtf >= Dtc (because Dtc==0) -	 *  Any case where t==f!=c: always wait.		Dtf <  Dtc (because Dtf==0, -	 *							Dtc!=0) -	 * -	 *  Other cases: -	 * -	 *	A) .....t..f..c.....	Dtf < Dtc	need to wait -	 *	A) .....f..c..t.....	Dtf < Dtc	need to wait -	 *	A) .....f..t..c.....	Dtf > Dtc	expired -	 * -	 *   So: -	 *	   Dtf >= Dtc implies EXPIRED	(return true) -	 *	   Dtf <  Dtc implies WAIT	(return false) -	 * -	 * Note: If t is expired then we *cannot* wait on it. We would wait -	 * forever (hang the system). -	 * -	 * Note: do NOT get clever and remove the -thresh from both sides. It -	 * is NOT the same. -	 * -	 * If future valueis zero, we have a client managed sync point. In that -	 * case we do a direct comparison. -	 */ -	if (!host1x_syncpt_client_managed(sp)) -		return future_val - thresh >= current_val - thresh; -	else -		return (s32)(current_val - thresh) >= 0; + +	return ((current_val - thresh) & 0x80000000U) == 0U;  }  int host1x_syncpt_init(struct host1x *host) @@ -401,10 +370,15 @@ int host1x_syncpt_init(struct host1x *host)  	host1x_hw_syncpt_enable_protection(host);  	/* Allocate sync point to use for clearing waits for expired fences */ -	host->nop_sp = host1x_syncpt_alloc(host, NULL, 0); +	host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop");  	if (!host->nop_sp)  		return -ENOMEM; +	if (host->info->reserve_vblank_syncpts) { +		kref_init(&host->syncpt[26].ref); +		kref_init(&host->syncpt[27].ref); +	} +  	return 0;  } @@ -416,44 +390,50 @@ int host1x_syncpt_init(struct host1x *host)   * host1x client drivers can use this function to allocate a syncpoint for   * subsequent use. A syncpoint returned by this function will be reserved for   * use by the client exclusively. When no longer using a syncpoint, a host1x - * client driver needs to release it using host1x_syncpt_free(). + * client driver needs to release it using host1x_syncpt_put().   */  struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,  					    unsigned long flags)  {  	struct host1x *host = dev_get_drvdata(client->host->parent); -	return host1x_syncpt_alloc(host, client, flags); +	return host1x_syncpt_alloc(host, flags, dev_name(client->dev));  }  EXPORT_SYMBOL(host1x_syncpt_request); -/** - * host1x_syncpt_free() - free a requested syncpoint - * @sp: host1x syncpoint - * - * Release a syncpoint previously allocated using host1x_syncpt_request(). A - * host1x client driver should call this when the syncpoint is no longer in - * use. Note that client drivers must ensure that the syncpoint doesn't remain - * under the control of hardware after calling this function, otherwise two - * clients may end up trying to access the same syncpoint concurrently. - */ -void host1x_syncpt_free(struct host1x_syncpt *sp) +static void syncpt_release(struct kref *ref)  { -	if (!sp) -		return; +	struct host1x_syncpt *sp = container_of(ref, struct host1x_syncpt, ref); + +	atomic_set(&sp->max_val, host1x_syncpt_read(sp));  	mutex_lock(&sp->host->syncpt_mutex);  	host1x_syncpt_base_free(sp->base);  	kfree(sp->name);  	sp->base = NULL; -	sp->client = NULL;  	sp->name = NULL;  	sp->client_managed = false;  	mutex_unlock(&sp->host->syncpt_mutex);  } -EXPORT_SYMBOL(host1x_syncpt_free); + +/** + * host1x_syncpt_put() - free a requested syncpoint + * @sp: host1x syncpoint + * + * Release a syncpoint previously allocated using host1x_syncpt_request(). A + * host1x client driver should call this when the syncpoint is no longer in + * use. + */ +void host1x_syncpt_put(struct host1x_syncpt *sp) +{ +	if (!sp) +		return; + +	kref_put(&sp->ref, syncpt_release); +} +EXPORT_SYMBOL(host1x_syncpt_put);  void host1x_syncpt_deinit(struct host1x *host)  { @@ -520,16 +500,48 @@ unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)  }  /** - * host1x_syncpt_get() - obtain a syncpoint by ID + * host1x_syncpt_get_by_id() - obtain a syncpoint by ID   * @host: host1x controller   * @id: syncpoint ID   */ -struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id) +struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, +					      unsigned int id)  {  	if (id >= host->info->nb_pts)  		return NULL; -	return host->syncpt + id; +	if (kref_get_unless_zero(&host->syncpt[id].ref)) +		return &host->syncpt[id]; +	else +		return NULL; +} +EXPORT_SYMBOL(host1x_syncpt_get_by_id); + +/** + * host1x_syncpt_get_by_id_noref() - obtain a syncpoint by ID but don't + * 	increase the refcount. + * @host: host1x controller + * @id: syncpoint ID + */ +struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, +						    unsigned int id) +{ +	if (id >= host->info->nb_pts) +		return NULL; + +	return &host->syncpt[id]; +} +EXPORT_SYMBOL(host1x_syncpt_get_by_id_noref); + +/** + * host1x_syncpt_get() - increment syncpoint refcount + * @sp: syncpoint + */ +struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp) +{ +	kref_get(&sp->ref); + +	return sp;  }  EXPORT_SYMBOL(host1x_syncpt_get); @@ -552,3 +564,31 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)  	return base->id;  }  EXPORT_SYMBOL(host1x_syncpt_base_id); + +static void do_nothing(struct kref *ref) +{ +} + +/** + * host1x_syncpt_release_vblank_reservation() - Make VBLANK syncpoint + *   available for allocation + * + * @client: host1x bus client + * @syncpt_id: syncpoint ID to make available + * + * Makes VBLANK<i> syncpoint available for allocatation if it was + * reserved at initialization time. This should be called by the display + * driver after it has ensured that any VBLANK increment programming configured + * by the boot chain has been disabled. + */ +void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, +					      u32 syncpt_id) +{ +	struct host1x *host = dev_get_drvdata(client->host->parent); + +	if (!host->info->reserve_vblank_syncpts) +		return; + +	kref_put(&host->syncpt[syncpt_id].ref, do_nothing); +} +EXPORT_SYMBOL(host1x_syncpt_release_vblank_reservation); diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h index 8e1d04dacaa0..a6766f8d55ee 100644 --- a/drivers/gpu/host1x/syncpt.h +++ b/drivers/gpu/host1x/syncpt.h @@ -11,6 +11,7 @@  #include <linux/atomic.h>  #include <linux/host1x.h>  #include <linux/kernel.h> +#include <linux/kref.h>  #include <linux/sched.h>  #include "intr.h" @@ -26,6 +27,8 @@ struct host1x_syncpt_base {  };  struct host1x_syncpt { +	struct kref ref; +  	unsigned int id;  	atomic_t min_val;  	atomic_t max_val; @@ -33,7 +36,6 @@ struct host1x_syncpt {  	const char *name;  	bool client_managed;  	struct host1x *host; -	struct host1x_client *client;  	struct host1x_syncpt_base *base;  	/* interrupt data */ | 
