summaryrefslogtreecommitdiff
path: root/bsd-core
ModeNameSize
-rw-r--r--Makefile1216logplain
-rw-r--r--ati_pcigart.c3075logplain
d---------drm36logplain
-rw-r--r--drmP.h32623logplain
-rw-r--r--drm_agpsupport.c9989logplain
-rw-r--r--drm_atomic.h3860logplain
-rw-r--r--drm_auth.c4242logplain
-rw-r--r--drm_bufs.c28746logplain
-rw-r--r--drm_context.c8407logplain
-rw-r--r--drm_dma.c3483logplain
-rw-r--r--drm_drawable.c1779logplain
-rw-r--r--drm_drv.c25081logplain
-rw-r--r--drm_fops.c3566logplain
-rw-r--r--drm_ioctl.c6949logplain
-rw-r--r--drm_irq.c7059logplain
-rw-r--r--drm_linux_list.h2343logplain
-rw-r--r--drm_lock.c4945logplain
-rw-r--r--drm_memory.c4079logplain
-rw-r--r--drm_pci.c3907logplain
-rw-r--r--drm_scatter.c3356logplain
-rw-r--r--drm_sysctl.c7512logplain
-rw-r--r--drm_vm.c3594logplain
d---------i91536logplain
-rw-r--r--i915_drv.c3598logplain
d---------mach6436logplain
-rw-r--r--mach64_drv.c3676logplain
d---------mga36logplain
-rw-r--r--mga_drv.c5194logplain
d---------r12836logplain
-rw-r--r--r128_drv.c3794logplain
d---------radeon36logplain
-rw-r--r--radeon_drv.c4006logplain
d---------savage36logplain
-rw-r--r--savage_drv.c3250logplain
d---------sis36logplain
-rw-r--r--sis_drv.c3102logplain
d---------tdfx36logplain
-rw-r--r--tdfx_drv.c3144logplain
d---------via36logplain
-rw-r--r--via_drv.c3447logplain
">*native_type = DRM_FENCE_TYPE_EXE; break; default: ret = -EINVAL; break; } return ret; } /** * Manual poll (from the fence manager). */ void via_poke_flush(struct drm_device * dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; struct drm_fence_manager *fm = &dev->fm; unsigned long flags; uint32_t pending_flush; if (!dev_priv) return ; write_lock_irqsave(&fm->lock, flags); pending_flush = via_perform_flush(dev, class); if (pending_flush) pending_flush = via_perform_flush(dev, class); write_unlock_irqrestore(&fm->lock, flags); /* * Kick the timer if there are more flushes pending. */ if (pending_flush && !timer_pending(&dev_priv->fence_timer)) { dev_priv->fence_timer.expires = jiffies + 1; add_timer(&dev_priv->fence_timer); } } /** * No irq fence expirations implemented yet. * Although both the HQV engines and PCI dmablit engines signal * idle with an IRQ, we haven't implemented this yet. * This means that the drm fence manager will always poll for engine idle, * unless the caller wanting to wait for a fence object has indicated a lazy wait. */ int via_fence_has_irq(struct drm_device * dev, uint32_t class, uint32_t flags) { return 0; } /** * Regularly call the flush function. This enables lazy waits, so we can * set lazy_capable. Lazy waits don't really care when the fence expires, * so a timer tick delay should be fine. */ void via_fence_timer(unsigned long data) { struct drm_device *dev = (struct drm_device *) data;