Lines Matching defs:host
186 static void host1x_setup_sid_table(struct host1x *host)
188 const struct host1x_info *info = host->info;
194 host1x_hypervisor_writel(host, entry->offset, entry->base);
195 host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
238 static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
240 struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
244 if (host->dev->archdata.mapping) {
246 to_dma_iommu_mapping(host->dev);
247 arm_iommu_detach_device(host->dev);
250 domain = iommu_get_domain_for_dev(host->dev);
262 if (!host1x_wants_iommu(host) || domain)
265 host->group = iommu_group_get(host->dev);
266 if (host->group) {
275 host->domain = iommu_domain_alloc(&platform_bus_type);
276 if (!host->domain) {
281 err = iommu_attach_group(host->domain, host->group);
289 geometry = &host->domain->geometry;
290 start = geometry->aperture_start & host->info->dma_mask;
291 end = geometry->aperture_end & host->info->dma_mask;
293 order = __ffs(host->domain->pgsize_bitmap);
294 init_iova_domain(&host->iova, 1UL << order, start >> order);
295 host->iova_end = end;
297 domain = host->domain;
303 iommu_domain_free(host->domain);
304 host->domain = NULL;
308 iommu_group_put(host->group);
309 host->group = NULL;
314 static int host1x_iommu_init(struct host1x *host)
316 u64 mask = host->info->dma_mask;
320 domain = host1x_iommu_attach(host);
323 dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
335 if (!domain && !host->info->has_wide_gather)
338 err = dma_coerce_mask_and_coherent(host->dev, mask);
340 dev_err(host->dev, "failed to set DMA mask: %d\n", err);
347 static void host1x_iommu_exit(struct host1x *host)
349 if (host->domain) {
350 put_iova_domain(&host->iova);
351 iommu_detach_group(host->domain, host->group);
353 iommu_domain_free(host->domain);
354 host->domain = NULL;
358 iommu_group_put(host->group);
359 host->group = NULL;
365 struct host1x *host;
370 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
371 if (!host)
374 host->info = of_device_get_match_data(&pdev->dev);
376 if (host->info->has_hypervisor) {
402 mutex_init(&host->devices_lock);
403 INIT_LIST_HEAD(&host->devices);
404 INIT_LIST_HEAD(&host->list);
405 host->dev = &pdev->dev;
408 platform_set_drvdata(pdev, host);
410 host->regs = devm_ioremap_resource(&pdev->dev, regs);
411 if (IS_ERR(host->regs))
412 return PTR_ERR(host->regs);
414 if (host->info->has_hypervisor) {
415 host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs);
416 if (IS_ERR(host->hv_regs))
417 return PTR_ERR(host->hv_regs);
420 host->dev->dma_parms = &host->dma_parms;
421 dma_set_max_seg_size(host->dev, UINT_MAX);
423 if (host->info->init) {
424 err = host->info->init(host);
429 host->clk = devm_clk_get(&pdev->dev, NULL);
430 if (IS_ERR(host->clk)) {
431 err = PTR_ERR(host->clk);
439 host->rst = devm_reset_control_get(&pdev->dev, "host1x");
440 if (IS_ERR(host->rst)) {
441 err = PTR_ERR(host->rst);
446 err = host1x_iommu_init(host);
452 err = host1x_channel_list_init(&host->channel_list,
453 host->info->nb_channels);
459 err = clk_prepare_enable(host->clk);
465 err = reset_control_deassert(host->rst);
471 err = host1x_syncpt_init(host);
477 err = host1x_intr_init(host, syncpt_irq);
483 host1x_debug_init(host);
485 if (host->info->has_hypervisor)
486 host1x_setup_sid_table(host);
488 err = host1x_register(host);
499 host1x_unregister(host);
501 host1x_debug_deinit(host);
502 host1x_intr_deinit(host);
504 host1x_syncpt_deinit(host);
506 reset_control_assert(host->rst);
508 clk_disable_unprepare(host->clk);
510 host1x_channel_list_free(&host->channel_list);
512 host1x_iommu_exit(host);
519 struct host1x *host = platform_get_drvdata(pdev);
521 host1x_unregister(host);
522 host1x_debug_deinit(host);
523 host1x_intr_deinit(host);
524 host1x_syncpt_deinit(host);
525 reset_control_assert(host->rst);
526 clk_disable_unprepare(host->clk);
527 host1x_channel_list_free(&host->channel_list);
528 host1x_iommu_exit(host);