diff options
Diffstat (limited to 'drivers/dma/idxd/sysfs.c')
| -rw-r--r-- | drivers/dma/idxd/sysfs.c | 795 | 
1 files changed, 350 insertions, 445 deletions
| diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 4dbb03c545e4..0460d58e3941 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -16,69 +16,6 @@ static char *idxd_wq_type_names[] = {  	[IDXD_WQT_USER]		= "user",  }; -static void idxd_conf_device_release(struct device *dev) -{ -	dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev)); -} - -static struct device_type idxd_group_device_type = { -	.name = "group", -	.release = idxd_conf_device_release, -}; - -static struct device_type idxd_wq_device_type = { -	.name = "wq", -	.release = idxd_conf_device_release, -}; - -static struct device_type idxd_engine_device_type = { -	.name = "engine", -	.release = idxd_conf_device_release, -}; - -static struct device_type dsa_device_type = { -	.name = "dsa", -	.release = idxd_conf_device_release, -}; - -static struct device_type iax_device_type = { -	.name = "iax", -	.release = idxd_conf_device_release, -}; - -static inline bool is_dsa_dev(struct device *dev) -{ -	return dev ? dev->type == &dsa_device_type : false; -} - -static inline bool is_iax_dev(struct device *dev) -{ -	return dev ? dev->type == &iax_device_type : false; -} - -static inline bool is_idxd_dev(struct device *dev) -{ -	return is_dsa_dev(dev) || is_iax_dev(dev); -} - -static inline bool is_idxd_wq_dev(struct device *dev) -{ -	return dev ? dev->type == &idxd_wq_device_type : false; -} - -static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) -{ -	if (wq->type == IDXD_WQT_KERNEL && -	    strcmp(wq->name, "dmaengine") == 0) -		return true; -	return false; -} - -static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) -{ -	return wq->type == IDXD_WQT_USER; -} -  static int idxd_config_bus_match(struct device *dev,  				 struct device_driver *drv)  { @@ -110,9 +47,131 @@ static int idxd_config_bus_match(struct device *dev,  	return matched;  } -static int idxd_config_bus_probe(struct device *dev) +static int enable_wq(struct idxd_wq *wq)  { +	struct idxd_device *idxd = wq->idxd; +	struct device *dev = &idxd->pdev->dev; +	unsigned long flags;  	int rc; + +	mutex_lock(&wq->wq_lock); + +	if (idxd->state != IDXD_DEV_ENABLED) { +		mutex_unlock(&wq->wq_lock); +		dev_warn(dev, "Enabling while device not enabled.\n"); +		return -EPERM; +	} + +	if (wq->state != IDXD_WQ_DISABLED) { +		mutex_unlock(&wq->wq_lock); +		dev_warn(dev, "WQ %d already enabled.\n", wq->id); +		return -EBUSY; +	} + +	if (!wq->group) { +		mutex_unlock(&wq->wq_lock); +		dev_warn(dev, "WQ not attached to group.\n"); +		return -EINVAL; +	} + +	if (strlen(wq->name) == 0) { +		mutex_unlock(&wq->wq_lock); +		dev_warn(dev, "WQ name not set.\n"); +		return -EINVAL; +	} + +	/* Shared WQ checks */ +	if (wq_shared(wq)) { +		if (!device_swq_supported(idxd)) { +			dev_warn(dev, "PASID not enabled and shared WQ.\n"); +			mutex_unlock(&wq->wq_lock); +			return -ENXIO; +		} +		/* +		 * Shared wq with the threshold set to 0 means the user +		 * did not set the threshold or transitioned from a +		 * dedicated wq but did not set threshold. A value +		 * of 0 would effectively disable the shared wq. The +		 * driver does not allow a value of 0 to be set for +		 * threshold via sysfs. +		 */ +		if (wq->threshold == 0) { +			dev_warn(dev, "Shared WQ and threshold 0.\n"); +			mutex_unlock(&wq->wq_lock); +			return -EINVAL; +		} +	} + +	rc = idxd_wq_alloc_resources(wq); +	if (rc < 0) { +		mutex_unlock(&wq->wq_lock); +		dev_warn(dev, "WQ resource alloc failed\n"); +		return rc; +	} + +	spin_lock_irqsave(&idxd->dev_lock, flags); +	if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) +		rc = idxd_device_config(idxd); +	spin_unlock_irqrestore(&idxd->dev_lock, flags); +	if (rc < 0) { +		mutex_unlock(&wq->wq_lock); +		dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc); +		return rc; +	} + +	rc = idxd_wq_enable(wq); +	if (rc < 0) { +		mutex_unlock(&wq->wq_lock); +		dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc); +		return rc; +	} + +	rc = idxd_wq_map_portal(wq); +	if (rc < 0) { +		dev_warn(dev, "wq portal mapping failed: %d\n", rc); +		rc = idxd_wq_disable(wq); +		if (rc < 0) +			dev_warn(dev, "IDXD wq disable failed\n"); +		mutex_unlock(&wq->wq_lock); +		return rc; +	} + +	wq->client_count = 0; + +	if (wq->type == IDXD_WQT_KERNEL) { +		rc = idxd_wq_init_percpu_ref(wq); +		if (rc < 0) { +			dev_dbg(dev, "percpu_ref setup failed\n"); +			mutex_unlock(&wq->wq_lock); +			return rc; +		} +	} + +	if (is_idxd_wq_dmaengine(wq)) { +		rc = idxd_register_dma_channel(wq); +		if (rc < 0) { +			dev_dbg(dev, "DMA channel register failed\n"); +			mutex_unlock(&wq->wq_lock); +			return rc; +		} +	} else if (is_idxd_wq_cdev(wq)) { +		rc = idxd_wq_add_cdev(wq); +		if (rc < 0) { +			dev_dbg(dev, "Cdev creation failed\n"); +			mutex_unlock(&wq->wq_lock); +			return rc; +		} +	} + +	mutex_unlock(&wq->wq_lock); +	dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); + +	return 0; +} + +static int idxd_config_bus_probe(struct device *dev) +{ +	int rc = 0;  	unsigned long flags;  	dev_dbg(dev, "%s called\n", __func__); @@ -130,7 +189,8 @@ static int idxd_config_bus_probe(struct device *dev)  		/* Perform IDXD configuration and enabling */  		spin_lock_irqsave(&idxd->dev_lock, flags); -		rc = idxd_device_config(idxd); +		if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) +			rc = idxd_device_config(idxd);  		spin_unlock_irqrestore(&idxd->dev_lock, flags);  		if (rc < 0) {  			module_put(THIS_MODULE); @@ -157,115 +217,8 @@ static int idxd_config_bus_probe(struct device *dev)  		return 0;  	} else if (is_idxd_wq_dev(dev)) {  		struct idxd_wq *wq = confdev_to_wq(dev); -		struct idxd_device *idxd = wq->idxd; - -		mutex_lock(&wq->wq_lock); - -		if (idxd->state != IDXD_DEV_ENABLED) { -			mutex_unlock(&wq->wq_lock); -			dev_warn(dev, "Enabling while device not enabled.\n"); -			return -EPERM; -		} - -		if (wq->state != IDXD_WQ_DISABLED) { -			mutex_unlock(&wq->wq_lock); -			dev_warn(dev, "WQ %d already enabled.\n", wq->id); -			return -EBUSY; -		} - -		if (!wq->group) { -			mutex_unlock(&wq->wq_lock); -			dev_warn(dev, "WQ not attached to group.\n"); -			return -EINVAL; -		} - -		if (strlen(wq->name) == 0) { -			mutex_unlock(&wq->wq_lock); -			dev_warn(dev, "WQ name not set.\n"); -			return -EINVAL; -		} - -		/* Shared WQ checks */ -		if (wq_shared(wq)) { -			if (!device_swq_supported(idxd)) { -				dev_warn(dev, -					 "PASID not enabled and shared WQ.\n"); -				mutex_unlock(&wq->wq_lock); -				return -ENXIO; -			} -			/* -			 * Shared wq with the threshold set to 0 means the user -			 * did not set the threshold or transitioned from a -			 * dedicated wq but did not set threshold. A value -			 * of 0 would effectively disable the shared wq. The -			 * driver does not allow a value of 0 to be set for -			 * threshold via sysfs. -			 */ -			if (wq->threshold == 0) { -				dev_warn(dev, -					 "Shared WQ and threshold 0.\n"); -				mutex_unlock(&wq->wq_lock); -				return -EINVAL; -			} -		} - -		rc = idxd_wq_alloc_resources(wq); -		if (rc < 0) { -			mutex_unlock(&wq->wq_lock); -			dev_warn(dev, "WQ resource alloc failed\n"); -			return rc; -		} - -		spin_lock_irqsave(&idxd->dev_lock, flags); -		rc = idxd_device_config(idxd); -		spin_unlock_irqrestore(&idxd->dev_lock, flags); -		if (rc < 0) { -			mutex_unlock(&wq->wq_lock); -			dev_warn(dev, "Writing WQ %d config failed: %d\n", -				 wq->id, rc); -			return rc; -		} - -		rc = idxd_wq_enable(wq); -		if (rc < 0) { -			mutex_unlock(&wq->wq_lock); -			dev_warn(dev, "WQ %d enabling failed: %d\n", -				 wq->id, rc); -			return rc; -		} - -		rc = idxd_wq_map_portal(wq); -		if (rc < 0) { -			dev_warn(dev, "wq portal mapping failed: %d\n", rc); -			rc = idxd_wq_disable(wq); -			if (rc < 0) -				dev_warn(dev, "IDXD wq disable failed\n"); -			mutex_unlock(&wq->wq_lock); -			return rc; -		} - -		wq->client_count = 0; -		dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); - -		if (is_idxd_wq_dmaengine(wq)) { -			rc = idxd_register_dma_channel(wq); -			if (rc < 0) { -				dev_dbg(dev, "DMA channel register failed\n"); -				mutex_unlock(&wq->wq_lock); -				return rc; -			} -		} else if (is_idxd_wq_cdev(wq)) { -			rc = idxd_wq_add_cdev(wq); -			if (rc < 0) { -				dev_dbg(dev, "Cdev creation failed\n"); -				mutex_unlock(&wq->wq_lock); -				return rc; -			} -		} - -		mutex_unlock(&wq->wq_lock); -		return 0; +		return enable_wq(wq);  	}  	return -ENODEV; @@ -275,7 +228,6 @@ static void disable_wq(struct idxd_wq *wq)  {  	struct idxd_device *idxd = wq->idxd;  	struct device *dev = &idxd->pdev->dev; -	int rc;  	mutex_lock(&wq->wq_lock);  	dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); @@ -284,6 +236,9 @@ static void disable_wq(struct idxd_wq *wq)  		return;  	} +	if (wq->type == IDXD_WQT_KERNEL) +		idxd_wq_quiesce(wq); +  	if (is_idxd_wq_dmaengine(wq))  		idxd_unregister_dma_channel(wq);  	else if (is_idxd_wq_cdev(wq)) @@ -296,17 +251,13 @@ static void disable_wq(struct idxd_wq *wq)  	idxd_wq_unmap_portal(wq);  	idxd_wq_drain(wq); -	rc = idxd_wq_disable(wq); +	idxd_wq_reset(wq);  	idxd_wq_free_resources(wq);  	wq->client_count = 0;  	mutex_unlock(&wq->wq_lock); -	if (rc < 0) -		dev_warn(dev, "Failed to disable %s: %d\n", -			 dev_name(&wq->conf_dev), rc); -	else -		dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); +	dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));  }  static int idxd_config_bus_remove(struct device *dev) @@ -327,7 +278,7 @@ static int idxd_config_bus_remove(struct device *dev)  		dev_dbg(dev, "%s removing dev %s\n", __func__,  			dev_name(&idxd->conf_dev));  		for (i = 0; i < idxd->max_wqs; i++) { -			struct idxd_wq *wq = &idxd->wqs[i]; +			struct idxd_wq *wq = idxd->wqs[i];  			if (wq->state == IDXD_WQ_DISABLED)  				continue; @@ -338,12 +289,14 @@ static int idxd_config_bus_remove(struct device *dev)  		idxd_unregister_dma_device(idxd);  		rc = idxd_device_disable(idxd); -		for (i = 0; i < idxd->max_wqs; i++) { -			struct idxd_wq *wq = &idxd->wqs[i]; +		if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { +			for (i = 0; i < idxd->max_wqs; i++) { +				struct idxd_wq *wq = idxd->wqs[i]; -			mutex_lock(&wq->wq_lock); -			idxd_wq_disable_cleanup(wq); -			mutex_unlock(&wq->wq_lock); +				mutex_lock(&wq->wq_lock); +				idxd_wq_disable_cleanup(wq); +				mutex_unlock(&wq->wq_lock); +			}  		}  		module_put(THIS_MODULE);  		if (rc < 0) @@ -369,19 +322,6 @@ struct bus_type dsa_bus_type = {  	.shutdown = idxd_config_bus_shutdown,  }; -struct bus_type iax_bus_type = { -	.name = "iax", -	.match = idxd_config_bus_match, -	.probe = idxd_config_bus_probe, -	.remove = idxd_config_bus_remove, -	.shutdown = idxd_config_bus_shutdown, -}; - -static struct bus_type *idxd_bus_types[] = { -	&dsa_bus_type, -	&iax_bus_type -}; -  static struct idxd_device_driver dsa_drv = {  	.drv = {  		.name = "dsa", @@ -391,60 +331,15 @@ static struct idxd_device_driver dsa_drv = {  	},  }; -static struct idxd_device_driver iax_drv = { -	.drv = { -		.name = "iax", -		.bus = &iax_bus_type, -		.owner = THIS_MODULE, -		.mod_name = KBUILD_MODNAME, -	}, -}; - -static struct idxd_device_driver *idxd_drvs[] = { -	&dsa_drv, -	&iax_drv -}; - -struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) -{ -	return idxd_bus_types[idxd->type]; -} - -static struct device_type *idxd_get_device_type(struct idxd_device *idxd) -{ -	if (idxd->type == IDXD_TYPE_DSA) -		return &dsa_device_type; -	else if (idxd->type == IDXD_TYPE_IAX) -		return &iax_device_type; -	else -		return NULL; -} -  /* IDXD generic driver setup */  int idxd_register_driver(void)  { -	int i, rc; - -	for (i = 0; i < IDXD_TYPE_MAX; i++) { -		rc = driver_register(&idxd_drvs[i]->drv); -		if (rc < 0) -			goto drv_fail; -	} - -	return 0; - -drv_fail: -	while (--i >= 0) -		driver_unregister(&idxd_drvs[i]->drv); -	return rc; +	return driver_register(&dsa_drv.drv);  }  void idxd_unregister_driver(void)  { -	int i; - -	for (i = 0; i < IDXD_TYPE_MAX; i++) -		driver_unregister(&idxd_drvs[i]->drv); +	driver_unregister(&dsa_drv.drv);  }  /* IDXD engine attributes */ @@ -455,9 +350,9 @@ static ssize_t engine_group_id_show(struct device *dev,  		container_of(dev, struct idxd_engine, conf_dev);  	if (engine->group) -		return sprintf(buf, "%d\n", engine->group->id); +		return sysfs_emit(buf, "%d\n", engine->group->id);  	else -		return sprintf(buf, "%d\n", -1); +		return sysfs_emit(buf, "%d\n", -1);  }  static ssize_t engine_group_id_store(struct device *dev, @@ -493,7 +388,7 @@ static ssize_t engine_group_id_store(struct device *dev,  	if (prevg)  		prevg->num_engines--; -	engine->group = &idxd->groups[id]; +	engine->group = idxd->groups[id];  	engine->group->num_engines++;  	return count; @@ -517,6 +412,19 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {  	NULL,  }; +static void idxd_conf_engine_release(struct device *dev) +{ +	struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev); + +	kfree(engine); +} + +struct device_type idxd_engine_device_type = { +	.name = "engine", +	.release = idxd_conf_engine_release, +	.groups = idxd_engine_attribute_groups, +}; +  /* Group attributes */  static void idxd_set_free_tokens(struct idxd_device *idxd) @@ -524,7 +432,7 @@ static void idxd_set_free_tokens(struct idxd_device *idxd)  	int i, tokens;  	for (i = 0, tokens = 0; i < idxd->max_groups; i++) { -		struct idxd_group *g = &idxd->groups[i]; +		struct idxd_group *g = idxd->groups[i];  		tokens += g->tokens_reserved;  	} @@ -539,7 +447,7 @@ static ssize_t group_tokens_reserved_show(struct device *dev,  	struct idxd_group *group =  		container_of(dev, struct idxd_group, conf_dev); -	return sprintf(buf, "%u\n", group->tokens_reserved); +	return sysfs_emit(buf, "%u\n", group->tokens_reserved);  }  static ssize_t group_tokens_reserved_store(struct device *dev, @@ -556,7 +464,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev,  	if (rc < 0)  		return -EINVAL; -	if (idxd->type == IDXD_TYPE_IAX) +	if (idxd->data->type == IDXD_TYPE_IAX)  		return -EOPNOTSUPP;  	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) @@ -587,7 +495,7 @@ static ssize_t group_tokens_allowed_show(struct device *dev,  	struct idxd_group *group =  		container_of(dev, struct idxd_group, conf_dev); -	return sprintf(buf, "%u\n", group->tokens_allowed); +	return sysfs_emit(buf, "%u\n", group->tokens_allowed);  }  static ssize_t group_tokens_allowed_store(struct device *dev, @@ -604,7 +512,7 @@ static ssize_t group_tokens_allowed_store(struct device *dev,  	if (rc < 0)  		return -EINVAL; -	if (idxd->type == IDXD_TYPE_IAX) +	if (idxd->data->type == IDXD_TYPE_IAX)  		return -EOPNOTSUPP;  	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) @@ -632,7 +540,7 @@ static ssize_t group_use_token_limit_show(struct device *dev,  	struct idxd_group *group =  		container_of(dev, struct idxd_group, conf_dev); -	return sprintf(buf, "%u\n", group->use_token_limit); +	return sysfs_emit(buf, "%u\n", group->use_token_limit);  }  static ssize_t group_use_token_limit_store(struct device *dev, @@ -649,7 +557,7 @@ static ssize_t group_use_token_limit_store(struct device *dev,  	if (rc < 0)  		return -EINVAL; -	if (idxd->type == IDXD_TYPE_IAX) +	if (idxd->data->type == IDXD_TYPE_IAX)  		return -EOPNOTSUPP;  	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) @@ -675,22 +583,22 @@ static ssize_t group_engines_show(struct device *dev,  	struct idxd_group *group =  		container_of(dev, struct idxd_group, conf_dev);  	int i, rc = 0; -	char *tmp = buf;  	struct idxd_device *idxd = group->idxd;  	for (i = 0; i < idxd->max_engines; i++) { -		struct idxd_engine *engine = &idxd->engines[i]; +		struct idxd_engine *engine = idxd->engines[i];  		if (!engine->group)  			continue;  		if (engine->group->id == group->id) -			rc += sprintf(tmp + rc, "engine%d.%d ", -					idxd->id, engine->id); +			rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);  	} +	if (!rc) +		return 0;  	rc--; -	rc += sprintf(tmp + rc, "\n"); +	rc += sysfs_emit_at(buf, rc, "\n");  	return rc;  } @@ -704,22 +612,22 @@ static ssize_t group_work_queues_show(struct device *dev,  	struct idxd_group *group =  		container_of(dev, struct idxd_group, conf_dev);  	int i, rc = 0; -	char *tmp = buf;  	struct idxd_device *idxd = group->idxd;  	for (i = 0; i < idxd->max_wqs; i++) { -		struct idxd_wq *wq = &idxd->wqs[i]; +		struct idxd_wq *wq = idxd->wqs[i];  		if (!wq->group)  			continue;  		if (wq->group->id == group->id) -			rc += sprintf(tmp + rc, "wq%d.%d ", -					idxd->id, wq->id); +			rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);  	} +	if (!rc) +		return 0;  	rc--; -	rc += sprintf(tmp + rc, "\n"); +	rc += sysfs_emit_at(buf, rc, "\n");  	return rc;  } @@ -734,7 +642,7 @@ static ssize_t group_traffic_class_a_show(struct device *dev,  	struct idxd_group *group =  		container_of(dev, struct idxd_group, conf_dev); -	return sprintf(buf, "%d\n", group->tc_a); +	return sysfs_emit(buf, "%d\n", group->tc_a);  }  static ssize_t group_traffic_class_a_store(struct device *dev, @@ -775,7 +683,7 @@ static ssize_t group_traffic_class_b_show(struct device *dev,  	struct idxd_group *group =  		container_of(dev, struct idxd_group, conf_dev); -	return sprintf(buf, "%d\n", group->tc_b); +	return sysfs_emit(buf, "%d\n", group->tc_b);  }  static ssize_t group_traffic_class_b_store(struct device *dev, @@ -829,13 +737,26 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {  	NULL,  }; +static void idxd_conf_group_release(struct device *dev) +{ +	struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev); + +	kfree(group); +} + +struct device_type idxd_group_device_type = { +	.name = "group", +	.release = idxd_conf_group_release, +	.groups = idxd_group_attribute_groups, +}; +  /* IDXD work queue attribs */  static ssize_t wq_clients_show(struct device *dev,  			       struct device_attribute *attr, char *buf)  {  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); -	return sprintf(buf, "%d\n", wq->client_count); +	return sysfs_emit(buf, "%d\n", wq->client_count);  }  static struct device_attribute dev_attr_wq_clients = @@ -848,12 +769,12 @@ static ssize_t wq_state_show(struct device *dev,  	switch (wq->state) {  	case IDXD_WQ_DISABLED: -		return sprintf(buf, "disabled\n"); +		return sysfs_emit(buf, "disabled\n");  	case IDXD_WQ_ENABLED: -		return sprintf(buf, "enabled\n"); +		return sysfs_emit(buf, "enabled\n");  	} -	return sprintf(buf, "unknown\n"); +	return sysfs_emit(buf, "unknown\n");  }  static struct device_attribute dev_attr_wq_state = @@ -865,9 +786,9 @@ static ssize_t wq_group_id_show(struct device *dev,  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);  	if (wq->group) -		return sprintf(buf, "%u\n", wq->group->id); +		return sysfs_emit(buf, "%u\n", wq->group->id);  	else -		return sprintf(buf, "-1\n"); +		return sysfs_emit(buf, "-1\n");  }  static ssize_t wq_group_id_store(struct device *dev, @@ -901,7 +822,7 @@ static ssize_t wq_group_id_store(struct device *dev,  		return count;  	} -	group = &idxd->groups[id]; +	group = idxd->groups[id];  	prevg = wq->group;  	if (prevg) @@ -919,8 +840,7 @@ static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,  {  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); -	return sprintf(buf, "%s\n", -			wq_dedicated(wq) ? "dedicated" : "shared"); +	return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");  }  static ssize_t wq_mode_store(struct device *dev, @@ -956,7 +876,7 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,  {  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); -	return sprintf(buf, "%u\n", wq->size); +	return sysfs_emit(buf, "%u\n", wq->size);  }  static int total_claimed_wq_size(struct idxd_device *idxd) @@ -965,7 +885,7 @@ static int total_claimed_wq_size(struct idxd_device *idxd)  	int wq_size = 0;  	for (i = 0; i < idxd->max_wqs; i++) { -		struct idxd_wq *wq = &idxd->wqs[i]; +		struct idxd_wq *wq = idxd->wqs[i];  		wq_size += wq->size;  	} @@ -989,7 +909,7 @@ static ssize_t wq_size_store(struct device *dev,  	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))  		return -EPERM; -	if (wq->state != IDXD_WQ_DISABLED) +	if (idxd->state == IDXD_DEV_ENABLED)  		return -EPERM;  	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) @@ -1007,7 +927,7 @@ static ssize_t wq_priority_show(struct device *dev,  {  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); -	return sprintf(buf, "%u\n", wq->priority); +	return sysfs_emit(buf, "%u\n", wq->priority);  }  static ssize_t wq_priority_store(struct device *dev, @@ -1044,8 +964,7 @@ static ssize_t wq_block_on_fault_show(struct device *dev,  {  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); -	return sprintf(buf, "%u\n", -		       test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)); +	return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));  }  static ssize_t wq_block_on_fault_store(struct device *dev, @@ -1084,7 +1003,7 @@ static ssize_t wq_threshold_show(struct device *dev,  {  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); -	return sprintf(buf, "%u\n", wq->threshold); +	return sysfs_emit(buf, "%u\n", wq->threshold);  }  static ssize_t wq_threshold_store(struct device *dev, @@ -1127,15 +1046,12 @@ static ssize_t wq_type_show(struct device *dev,  	switch (wq->type) {  	case IDXD_WQT_KERNEL: -		return sprintf(buf, "%s\n", -			       idxd_wq_type_names[IDXD_WQT_KERNEL]); +		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);  	case IDXD_WQT_USER: -		return sprintf(buf, "%s\n", -			       idxd_wq_type_names[IDXD_WQT_USER]); +		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);  	case IDXD_WQT_NONE:  	default: -		return sprintf(buf, "%s\n", -			       idxd_wq_type_names[IDXD_WQT_NONE]); +		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);  	}  	return -EINVAL; @@ -1176,7 +1092,7 @@ static ssize_t wq_name_show(struct device *dev,  {  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); -	return sprintf(buf, "%s\n", wq->name); +	return sysfs_emit(buf, "%s\n", wq->name);  }  static ssize_t wq_name_store(struct device *dev, @@ -1211,8 +1127,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev,  				  struct device_attribute *attr, char *buf)  {  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); +	int minor = -1; + +	mutex_lock(&wq->wq_lock); +	if (wq->idxd_cdev) +		minor = wq->idxd_cdev->minor; +	mutex_unlock(&wq->wq_lock); -	return sprintf(buf, "%d\n", wq->idxd_cdev.minor); +	if (minor == -1) +		return -ENXIO; +	return sysfs_emit(buf, "%d\n", minor);  }  static struct device_attribute dev_attr_wq_cdev_minor = @@ -1238,7 +1162,7 @@ static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attri  {  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); -	return sprintf(buf, "%llu\n", wq->max_xfer_bytes); +	return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);  }  static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr, @@ -1272,7 +1196,7 @@ static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribut  {  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); -	return sprintf(buf, "%u\n", wq->max_batch_size); +	return sysfs_emit(buf, "%u\n", wq->max_batch_size);  }  static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr, @@ -1305,7 +1229,7 @@ static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *  {  	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); -	return sprintf(buf, "%u\n", wq->ats_dis); +	return sysfs_emit(buf, "%u\n", wq->ats_dis);  }  static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr, @@ -1361,6 +1285,20 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {  	NULL,  }; +static void idxd_conf_wq_release(struct device *dev) +{ +	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + +	kfree(wq->wqcfg); +	kfree(wq); +} + +struct device_type idxd_wq_device_type = { +	.name = "wq", +	.release = idxd_conf_wq_release, +	.groups = idxd_wq_attribute_groups, +}; +  /* IDXD device attribs */  static ssize_t version_show(struct device *dev, struct device_attribute *attr,  			    char *buf) @@ -1368,7 +1306,7 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%#x\n", idxd->hw.version); +	return sysfs_emit(buf, "%#x\n", idxd->hw.version);  }  static DEVICE_ATTR_RO(version); @@ -1379,7 +1317,7 @@ static ssize_t max_work_queues_size_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%u\n", idxd->max_wq_size); +	return sysfs_emit(buf, "%u\n", idxd->max_wq_size);  }  static DEVICE_ATTR_RO(max_work_queues_size); @@ -1389,7 +1327,7 @@ static ssize_t max_groups_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%u\n", idxd->max_groups); +	return sysfs_emit(buf, "%u\n", idxd->max_groups);  }  static DEVICE_ATTR_RO(max_groups); @@ -1399,7 +1337,7 @@ static ssize_t max_work_queues_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%u\n", idxd->max_wqs); +	return sysfs_emit(buf, "%u\n", idxd->max_wqs);  }  static DEVICE_ATTR_RO(max_work_queues); @@ -1409,7 +1347,7 @@ static ssize_t max_engines_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%u\n", idxd->max_engines); +	return sysfs_emit(buf, "%u\n", idxd->max_engines);  }  static DEVICE_ATTR_RO(max_engines); @@ -1419,7 +1357,7 @@ static ssize_t numa_node_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); +	return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));  }  static DEVICE_ATTR_RO(numa_node); @@ -1429,7 +1367,7 @@ static ssize_t max_batch_size_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%u\n", idxd->max_batch_size); +	return sysfs_emit(buf, "%u\n", idxd->max_batch_size);  }  static DEVICE_ATTR_RO(max_batch_size); @@ -1440,7 +1378,7 @@ static ssize_t max_transfer_size_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); +	return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);  }  static DEVICE_ATTR_RO(max_transfer_size); @@ -1449,8 +1387,14 @@ static ssize_t op_cap_show(struct device *dev,  {  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); +	int i, rc = 0; + +	for (i = 0; i < 4; i++) +		rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]); -	return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]); +	rc--; +	rc += sysfs_emit_at(buf, rc, "\n"); +	return rc;  }  static DEVICE_ATTR_RO(op_cap); @@ -1460,7 +1404,7 @@ static ssize_t gen_cap_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits); +	return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);  }  static DEVICE_ATTR_RO(gen_cap); @@ -1470,8 +1414,7 @@ static ssize_t configurable_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%u\n", -			test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); +	return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));  }  static DEVICE_ATTR_RO(configurable); @@ -1485,13 +1428,13 @@ static ssize_t clients_show(struct device *dev,  	spin_lock_irqsave(&idxd->dev_lock, flags);  	for (i = 0; i < idxd->max_wqs; i++) { -		struct idxd_wq *wq = &idxd->wqs[i]; +		struct idxd_wq *wq = idxd->wqs[i];  		count += wq->client_count;  	}  	spin_unlock_irqrestore(&idxd->dev_lock, flags); -	return sprintf(buf, "%d\n", count); +	return sysfs_emit(buf, "%d\n", count);  }  static DEVICE_ATTR_RO(clients); @@ -1501,7 +1444,7 @@ static ssize_t pasid_enabled_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%u\n", device_pasid_enabled(idxd)); +	return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));  }  static DEVICE_ATTR_RO(pasid_enabled); @@ -1514,14 +1457,14 @@ static ssize_t state_show(struct device *dev,  	switch (idxd->state) {  	case IDXD_DEV_DISABLED:  	case IDXD_DEV_CONF_READY: -		return sprintf(buf, "disabled\n"); +		return sysfs_emit(buf, "disabled\n");  	case IDXD_DEV_ENABLED: -		return sprintf(buf, "enabled\n"); +		return sysfs_emit(buf, "enabled\n");  	case IDXD_DEV_HALTED: -		return sprintf(buf, "halted\n"); +		return sysfs_emit(buf, "halted\n");  	} -	return sprintf(buf, "unknown\n"); +	return sysfs_emit(buf, "unknown\n");  }  static DEVICE_ATTR_RO(state); @@ -1535,10 +1478,10 @@ static ssize_t errors_show(struct device *dev,  	spin_lock_irqsave(&idxd->dev_lock, flags);  	for (i = 0; i < 4; i++) -		out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); +		out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);  	spin_unlock_irqrestore(&idxd->dev_lock, flags);  	out--; -	out += sprintf(buf + out, "\n"); +	out += sysfs_emit_at(buf, out, "\n");  	return out;  }  static DEVICE_ATTR_RO(errors); @@ -1549,7 +1492,7 @@ static ssize_t max_tokens_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%u\n", idxd->max_tokens); +	return sysfs_emit(buf, "%u\n", idxd->max_tokens);  }  static DEVICE_ATTR_RO(max_tokens); @@ -1559,7 +1502,7 @@ static ssize_t token_limit_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%u\n", idxd->token_limit); +	return sysfs_emit(buf, "%u\n", idxd->token_limit);  }  static ssize_t token_limit_store(struct device *dev, @@ -1598,7 +1541,7 @@ static ssize_t cdev_major_show(struct device *dev,  	struct idxd_device *idxd =  		container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%u\n", idxd->major); +	return sysfs_emit(buf, "%u\n", idxd->major);  }  static DEVICE_ATTR_RO(cdev_major); @@ -1607,7 +1550,7 @@ static ssize_t cmd_status_show(struct device *dev,  {  	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); -	return sprintf(buf, "%#x\n", idxd->cmd_status); +	return sysfs_emit(buf, "%#x\n", idxd->cmd_status);  }  static DEVICE_ATTR_RO(cmd_status); @@ -1643,183 +1586,161 @@ static const struct attribute_group *idxd_attribute_groups[] = {  	NULL,  }; -static int idxd_setup_engine_sysfs(struct idxd_device *idxd) +static void idxd_conf_device_release(struct device *dev)  { -	struct device *dev = &idxd->pdev->dev; -	int i, rc; +	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); + +	kfree(idxd->groups); +	kfree(idxd->wqs); +	kfree(idxd->engines); +	kfree(idxd->irq_entries); +	kfree(idxd->int_handles); +	ida_free(&idxd_ida, idxd->id); +	kfree(idxd); +} + +struct device_type dsa_device_type = { +	.name = "dsa", +	.release = idxd_conf_device_release, +	.groups = idxd_attribute_groups, +}; + +struct device_type iax_device_type = { +	.name = "iax", +	.release = idxd_conf_device_release, +	.groups = idxd_attribute_groups, +}; + +static int idxd_register_engine_devices(struct idxd_device *idxd) +{ +	int i, j, rc;  	for (i = 0; i < idxd->max_engines; i++) { -		struct idxd_engine *engine = &idxd->engines[i]; - -		engine->conf_dev.parent = &idxd->conf_dev; -		dev_set_name(&engine->conf_dev, "engine%d.%d", -			     idxd->id, engine->id); -		engine->conf_dev.bus = idxd_get_bus_type(idxd); -		engine->conf_dev.groups = idxd_engine_attribute_groups; -		engine->conf_dev.type = &idxd_engine_device_type; -		dev_dbg(dev, "Engine device register: %s\n", -			dev_name(&engine->conf_dev)); -		rc = device_register(&engine->conf_dev); -		if (rc < 0) { -			put_device(&engine->conf_dev); +		struct idxd_engine *engine = idxd->engines[i]; + +		rc = device_add(&engine->conf_dev); +		if (rc < 0)  			goto cleanup; -		}  	}  	return 0;  cleanup: -	while (i--) { -		struct idxd_engine *engine = &idxd->engines[i]; +	j = i - 1; +	for (; i < idxd->max_engines; i++) +		put_device(&idxd->engines[i]->conf_dev); -		device_unregister(&engine->conf_dev); -	} +	while (j--) +		device_unregister(&idxd->engines[j]->conf_dev);  	return rc;  } -static int idxd_setup_group_sysfs(struct idxd_device *idxd) +static int idxd_register_group_devices(struct idxd_device *idxd)  { -	struct device *dev = &idxd->pdev->dev; -	int i, rc; +	int i, j, rc;  	for (i = 0; i < idxd->max_groups; i++) { -		struct idxd_group *group = &idxd->groups[i]; - -		group->conf_dev.parent = &idxd->conf_dev; -		dev_set_name(&group->conf_dev, "group%d.%d", -			     idxd->id, group->id); -		group->conf_dev.bus = idxd_get_bus_type(idxd); -		group->conf_dev.groups = idxd_group_attribute_groups; -		group->conf_dev.type = &idxd_group_device_type; -		dev_dbg(dev, "Group device register: %s\n", -			dev_name(&group->conf_dev)); -		rc = device_register(&group->conf_dev); -		if (rc < 0) { -			put_device(&group->conf_dev); +		struct idxd_group *group = idxd->groups[i]; + +		rc = device_add(&group->conf_dev); +		if (rc < 0)  			goto cleanup; -		}  	}  	return 0;  cleanup: -	while (i--) { -		struct idxd_group *group = &idxd->groups[i]; +	j = i - 1; +	for (; i < idxd->max_groups; i++) +		put_device(&idxd->groups[i]->conf_dev); -		device_unregister(&group->conf_dev); -	} +	while (j--) +		device_unregister(&idxd->groups[j]->conf_dev);  	return rc;  } -static int idxd_setup_wq_sysfs(struct idxd_device *idxd) +static int idxd_register_wq_devices(struct idxd_device *idxd)  { -	struct device *dev = &idxd->pdev->dev; -	int i, rc; +	int i, rc, j;  	for (i = 0; i < idxd->max_wqs; i++) { -		struct idxd_wq *wq = &idxd->wqs[i]; - -		wq->conf_dev.parent = &idxd->conf_dev; -		dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); -		wq->conf_dev.bus = idxd_get_bus_type(idxd); -		wq->conf_dev.groups = idxd_wq_attribute_groups; -		wq->conf_dev.type = &idxd_wq_device_type; -		dev_dbg(dev, "WQ device register: %s\n", -			dev_name(&wq->conf_dev)); -		rc = device_register(&wq->conf_dev); -		if (rc < 0) { -			put_device(&wq->conf_dev); +		struct idxd_wq *wq = idxd->wqs[i]; + +		rc = device_add(&wq->conf_dev); +		if (rc < 0)  			goto cleanup; -		}  	}  	return 0;  cleanup: -	while (i--) { -		struct idxd_wq *wq = &idxd->wqs[i]; +	j = i - 1; +	for (; i < idxd->max_wqs; i++) +		put_device(&idxd->wqs[i]->conf_dev); -		device_unregister(&wq->conf_dev); -	} +	while (j--) +		device_unregister(&idxd->wqs[j]->conf_dev);  	return rc;  } -static int idxd_setup_device_sysfs(struct idxd_device *idxd) -{ -	struct device *dev = &idxd->pdev->dev; -	int rc; -	char devname[IDXD_NAME_SIZE]; - -	sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); -	idxd->conf_dev.parent = dev; -	dev_set_name(&idxd->conf_dev, "%s", devname); -	idxd->conf_dev.bus = idxd_get_bus_type(idxd); -	idxd->conf_dev.groups = idxd_attribute_groups; -	idxd->conf_dev.type = idxd_get_device_type(idxd); - -	dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); -	rc = device_register(&idxd->conf_dev); -	if (rc < 0) { -		put_device(&idxd->conf_dev); -		return rc; -	} - -	return 0; -} - -int idxd_setup_sysfs(struct idxd_device *idxd) +int idxd_register_devices(struct idxd_device *idxd)  {  	struct device *dev = &idxd->pdev->dev; -	int rc; +	int rc, i; -	rc = idxd_setup_device_sysfs(idxd); -	if (rc < 0) { -		dev_dbg(dev, "Device sysfs registering failed: %d\n", rc); +	rc = device_add(&idxd->conf_dev); +	if (rc < 0)  		return rc; -	} -	rc = idxd_setup_wq_sysfs(idxd); +	rc = idxd_register_wq_devices(idxd);  	if (rc < 0) { -		/* unregister conf dev */ -		dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc); -		return rc; +		dev_dbg(dev, "WQ devices registering failed: %d\n", rc); +		goto err_wq;  	} -	rc = idxd_setup_group_sysfs(idxd); +	rc = idxd_register_engine_devices(idxd);  	if (rc < 0) { -		/* unregister conf dev */ -		dev_dbg(dev, "Group sysfs registering failed: %d\n", rc); -		return rc; +		dev_dbg(dev, "Engine devices registering failed: %d\n", rc); +		goto err_engine;  	} -	rc = idxd_setup_engine_sysfs(idxd); +	rc = idxd_register_group_devices(idxd);  	if (rc < 0) { -		/* unregister conf dev */ -		dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc); -		return rc; +		dev_dbg(dev, "Group device registering failed: %d\n", rc); +		goto err_group;  	}  	return 0; + + err_group: +	for (i = 0; i < idxd->max_engines; i++) +		device_unregister(&idxd->engines[i]->conf_dev); + err_engine: +	for (i = 0; i < idxd->max_wqs; i++) +		device_unregister(&idxd->wqs[i]->conf_dev); + err_wq: +	device_del(&idxd->conf_dev); +	return rc;  } -void idxd_cleanup_sysfs(struct idxd_device *idxd) +void idxd_unregister_devices(struct idxd_device *idxd)  {  	int i;  	for (i = 0; i < idxd->max_wqs; i++) { -		struct idxd_wq *wq = &idxd->wqs[i]; +		struct idxd_wq *wq = idxd->wqs[i];  		device_unregister(&wq->conf_dev);  	}  	for (i = 0; i < idxd->max_engines; i++) { -		struct idxd_engine *engine = &idxd->engines[i]; +		struct idxd_engine *engine = idxd->engines[i];  		device_unregister(&engine->conf_dev);  	}  	for (i = 0; i < idxd->max_groups; i++) { -		struct idxd_group *group = &idxd->groups[i]; +		struct idxd_group *group = idxd->groups[i];  		device_unregister(&group->conf_dev);  	} @@ -1829,26 +1750,10 @@ void idxd_cleanup_sysfs(struct idxd_device *idxd)  int idxd_register_bus_type(void)  { -	int i, rc; - -	for (i = 0; i < IDXD_TYPE_MAX; i++) { -		rc = bus_register(idxd_bus_types[i]); -		if (rc < 0) -			goto bus_err; -	} - -	return 0; - -bus_err: -	while (--i >= 0) -		bus_unregister(idxd_bus_types[i]); -	return rc; +	return bus_register(&dsa_bus_type);  }  void idxd_unregister_bus_type(void)  { -	int i; - -	for (i = 0; i < IDXD_TYPE_MAX; i++) -		bus_unregister(idxd_bus_types[i]); +	bus_unregister(&dsa_bus_type);  } | 
