summaryrefslogtreecommitdiff
path: root/shared-core/savage_bci.c
blob: 4b8a89fea640821f14a65106b4923e2282e9f7e0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
/* savage_bci.c -- BCI support for Savage
 *
 * Copyright 2004  Felix Kuehling
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sub license,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"

/* Need a long timeout for shadow status updates can take a while
 * and so can waiting for events when the queue is full. */
#define SAVAGE_DEFAULT_USEC_TIMEOUT	1000000 /* 1s */
#define SAVAGE_EVENT_USEC_TIMEOUT	5000000 /* 5s */
#define SAVAGE_FREELIST_DEBUG		0

static int savage_do_cleanup_bci(struct drm_device *dev);

static int
savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
{
	uint32_t mask = dev_priv->status_used_mask;
	uint32_t threshold = dev_priv->bci_threshold_hi;
	uint32_t status;
	int i;

#if SAVAGE_BCI_DEBUG
	if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
		DRM_ERROR("Trying to emit %d words "
			  "(more than guaranteed space in COB)\n", n);
#endif

	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
		DRM_MEMORYBARRIER();
		status = dev_priv->status_ptr[0];
		if ((status & mask) < threshold)
			return 0;
		DRM_UDELAY(1);
	}

#if SAVAGE_BCI_DEBUG
	DRM_ERROR("failed!\n");
	DRM_INFO("   status=0x%08x, threshold=0x%08x\n", status, threshold);
#endif
	return -EBUSY;
}

static int
savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
{
	uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
	uint32_t status;
	int i;

	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
		status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
		if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
			return 0;
		DRM_UDELAY(1);
	}

#if SAVAGE_BCI_DEBUG
	DRM_ERROR("failed!\n");
	DRM_INFO("   status=0x%08x\n", status);
#endif
	return -EBUSY;
}

static int
savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
{
	uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
	uint32_t status;
	int i;

	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
		status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
		if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
			return 0;
		DRM_UDELAY(1);
	}

#if SAVAGE_BCI_DEBUG
	DRM_ERROR("failed!\n");
	DRM_INFO("   status=0x%08x\n", status);
#endif
	return -EBUSY;
}

/*
 * Waiting for events.
 *
 * The BIOSresets the event tag to 0 on mode changes. Therefore we
 * never emit 0 to the event tag. If we find a 0 event tag we know the
 * BIOS stomped on it and return success assuming that the BIOS waited
 * for engine idle.
 *
 * Note: if the Xserver uses the event tag it has to follow the same
 * rule. Otherwise there may be glitches every 2^16 events.
 */
static int
savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
{
	uint32_t status;
	int i;

	for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
		DRM_MEMORYBARRIER();
		status = dev_priv->status_ptr[1];
		if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
		    (status & 0xffff) == 0)
			return 0;
		DRM_UDELAY(1);
	}

#if SAVAGE_BCI_DEBUG
	DRM_ERROR("failed!\n");
	DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
#endif

	return -EBUSY;
}

static int
savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
{
	uint32_t status;
	int i;

	for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
		status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
		if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
		    (status & 0xffff) == 0)
			return 0;
		DRM_UDELAY(1);
	}

#if SAVAGE_BCI_DEBUG
	DRM_ERROR("failed!\n");
	DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
#endif

	return -EBUSY;
}

uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
			       unsigned int flags)
{
	uint16_t count;
	BCI_LOCALS;

	if (dev_priv->status_ptr) {
		/* coordinate with Xserver */
		count = dev_priv->status_ptr[1023];
		if (count < dev_priv->event_counter)
			dev_priv->event_wrap++;
	} else {
		count = dev_priv->event_counter;
	}
	count = (count + 1) & 0xffff;
	if (count == 0) {
		count++; /* See the comment above savage_wait_event_*. */
		dev_priv->event_wrap++;
	}
	dev_priv->event_counter = count;
	if (dev_priv->status_ptr)
		dev_priv->status_ptr[1023] = (uint32_t)count;

	if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
		unsigned int wait_cmd = BCI_CMD_WAIT;
		if ((flags & SAVAGE_WAIT_2D))
			wait_cmd |= BCI_CMD_WAIT_2D;
		if ((flags & SAVAGE_WAIT_3D))
			wait_cmd |= BCI_CMD_WAIT_3D;
		BEGIN_BCI(2);
		BCI_WRITE(wait_cmd);
	} else {
		BEGIN_BCI(1);
	}
	BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count);

	return count;
}

/*
 * Freelist management
 */
static int savage_freelist_init(struct drm_device *dev)
{
	drm_savage_private_t *dev_priv = dev->dev_private;
	struct drm_device_dma *dma = dev->dma;
	struct drm_buf *buf;
	drm_savage_buf_priv_t *entry;
	int i;
	DRM_DEBUG("count=%d\n", dma->buf_count);

	dev_priv->head.next = &dev_priv->tail;
	dev_priv->head.prev = NULL;
	dev_priv->head.buf = NULL;

	dev_priv->tail.next = NULL;
	dev_priv->tail.prev = &dev_priv->head;
	dev_priv->tail.buf = NULL;

	for (i = 0; i < dma->buf_count; i++) {
		buf = dma->buflist[i];
		entry = buf->dev_private;

		SET_AGE(&entry->age, 0, 0);
		entry->buf = buf;

		entry->next = dev_priv->head.next;
		entry->prev = &dev_priv->head;
		dev_priv->head.next->prev = entry;
		dev_priv->head.next = entry;
	}

	return 0;
}

static struct drm_buf *savage_freelist_get(struct drm_device *dev)
{
	drm_savage_private_t *dev_priv = dev->dev_private;
	drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
	uint16_t event;
	unsigned int wrap;
	DRM_DEBUG("\n");

	UPDATE_EVENT_COUNTER();
	if (dev_priv->status_ptr)
		event = dev_priv->status_ptr[1] & 0xffff;
	else
		event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
	wrap = dev_priv->event_wrap;
	if (event > dev_priv->event_counter)
		wrap--; /* hardware hasn't passed the last wrap yet */

	DRM_DEBUG("   tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
	DRM_DEBUG("   head=0x%04x %d\n", event, wrap);

	if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
		drm_savage_buf_priv_t *next = tail->next;
		drm_savage_buf_priv_t *prev = tail->prev;
		prev->next = next;
		next->prev = prev;
		tail->next = tail->prev = NULL;
		return tail->buf;
	}

	DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
	return NULL;
}

void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
	drm_savage_private_t *dev_priv = dev->dev_private;
	drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;

	DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);

	if (entry->next != NULL || entry->prev != NULL) {
		DRM_ERROR("entry already on freelist.\n");
		return;
	}

	prev = &dev_priv->head;
	next = prev->next;
	prev->next = entry;
	next->prev = entry;
	entry->prev = prev;
	entry->next = next;
}

/*
 * Command DMA
 */
static int savage_dma_init(drm_savage_private_t *dev_priv)
{
	unsigned int i;

	dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
		(SAVAGE_DMA_PAGE_SIZE*4);
	dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
					dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
	if (dev_priv->dma_pages == NULL)
		return -ENOMEM;

	for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
		SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
		dev_priv->dma_pages[i].used = 0;
		dev_priv->dma_pages[i].flushed = 0;
	}
	SET_AGE(&dev_priv->last_dma_age, 0, 0);

	dev_priv->first_dma_page = 0;
	dev_priv->current_dma_page = 0;

	return 0;
}

void savage_dma_reset(drm_savage_private_t *dev_priv)
{
	uint16_t event;
	unsigned int wrap, i;
	event = savage_bci_emit_event(dev_priv, 0);
	wrap = dev_priv->event_wrap;
	for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
		SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
		dev_priv->dma_pages[i].used = 0;
		dev_priv->dma_pages[i].flushed = 0;
	}
	SET_AGE(&dev_priv->last_dma_age, event, wrap);
	dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
}

void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page)
{
	uint16_t event;
	unsigned int wrap;

	/* Faked DMA buffer pages don't age. */
	if (dev_priv->cmd_dma == &dev_priv->fake_dma)
		return;

	UPDATE_EVENT_COUNTER();
	if (dev_priv->status_ptr)
		event = dev_priv->status_ptr[1] & 0xffff;
	else
		event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
	wrap = dev_priv->event_wrap;
	if (event > dev_priv->event_counter)
		wrap--; /* hardware hasn't passed the last wrap yet */

	if (dev_priv->dma_pages[page].age.wrap > wrap ||
	    (dev_priv->dma_pages[page].age.wrap == wrap &&
	     dev_priv->dma_pages[page].age.event > event)) {
		if (dev_priv->wait_evnt(dev_priv,
					dev_priv->dma_pages[page].age.event)
		    < 0)
			DRM_ERROR("wait_evnt failed!\n");
	}
}

uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
{
	unsigned int cur = dev_priv->current_dma_page;
	unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
		dev_priv->dma_pages[cur].used;
	unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
		SAVAGE_DMA_PAGE_SIZE;
	uint32_t *dma_ptr;
	unsigned int i;

	DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
		  cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);

	if (cur + nr_pages < dev_priv->nr_dma_pages) {
		dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
		    cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
		if (n < rest)
			rest = n;
		dev_priv->dma_pages[cur].used += rest;
		n -= rest;
		cur++;
	} else {
		dev_priv->dma_flush(dev_priv);
		nr_pages =
		    (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
		for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
			dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
			dev_priv->dma_pages[i].used = 0;
			dev_priv->dma_pages[i].flushed = 0;
		}
		dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle;
		dev_priv->first_dma_page = cur = 0;
	}
	for (i = cur; nr_pages > 0; ++i, --nr_pages) {
#if SAVAGE_DMA_DEBUG
		if (dev_priv->dma_pages[i].used) {
			DRM_ERROR("unflushed page %u: used=%u\n",
				  i, dev_priv->dma_pages[i].used);
		}
#endif
		if (n > SAVAGE_DMA_PAGE_SIZE)
			dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
		else
			dev_priv->dma_pages[i].used = n;
		n -= SAVAGE_DMA_PAGE_SIZE;
	}
	dev_priv->current_dma_page = --i;

	DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
		  i, dev_priv->dma_pages[i].used, n);

	savage_dma_wait(dev_priv, dev_priv->current_dma_page);

	return dma_ptr;
}

static void savage_dma_flush(drm_savage_private_t *dev_priv)
{
	unsigned int first = dev_priv->first_dma_page;
	unsigned int cur = dev_priv->current_dma_page;
	uint16_t event;
	unsigned int wrap, pad, align, len, i;
	unsigned long phys_addr;
	BCI_LOCALS;

	if (first == cur &&
	    dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
		return;

	/* pad length to multiples of 2 entries
	 * align start of next DMA block to multiles of 8 entries */
	pad = -dev_priv->dma_pages[cur].used & 1;
	align = -(dev_priv->dma_pages[cur].used + pad) & 7;

	DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
		  "pad=%u, align=%u\n",
		  first, cur, dev_priv->dma_pages[first].flushed,
		  dev_priv->dma_pages[cur].used, pad, align);

	/* pad with noops */
	if (pad) {
		uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
		    cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
		dev_priv->dma_pages[cur].used += pad;
		while (pad != 0) {
			*dma_ptr++ = BCI_CMD_WAIT;
			pad--;
		}
	}

	DRM_MEMORYBARRIER();

	/* do flush ... */
	phys_addr = dev_priv->cmd_dma->offset +
		(first * SAVAGE_DMA_PAGE_SIZE +
		 dev_priv->dma_pages[first].flushed) * 4;
	len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
	    dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;

	DRM_DEBUG("phys_addr=%lx, len=%u\n",
		  phys_addr | dev_priv->dma_type, len);

	BEGIN_BCI(3);
	BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
	BCI_WRITE(phys_addr | dev_priv->dma_type);
	BCI_DMA(len);

	/* fix alignment of the start of the next block */
	dev_priv->dma_pages[cur].used += align;

	/* age DMA pages */
	event = savage_bci_emit_event(dev_priv, 0);
	wrap = dev_priv->event_wrap;
	for (i = first; i < cur; ++i) {
		SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
		dev_priv->dma_pages[i].used = 0;
		dev_priv->dma_pages[i].flushed = 0;
	}
	/* age the current page only when it's full */
	if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
		SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
		dev_priv->dma_pages[cur].used = 0;
		dev_priv->dma_pages[cur].flushed = 0;
		/* advance to next page */
		cur++;
		if (cur == dev_priv->nr_dma_pages)
			cur = 0;
		dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
	} else {
		dev_priv->first_dma_page = cur;
		dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
	}
	SET_AGE(&dev_priv->last_dma_age, event, wrap);

	DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
		  dev_priv->dma_pages[cur].used,
		  dev_priv->dma_pages[cur].flushed);
}

static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
{
	unsigned int i, j;
	BCI_LOCALS;

	if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
	    dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
		return;

	DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
		  dev_priv->first_dma_page, dev_priv->current_dma_page,
		  dev_priv->dma_pages[dev_priv->current_dma_page].used);

	for (i = dev_priv->first_dma_page;
	     i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
	     ++i) {
		uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
			i * SAVAGE_DMA_PAGE_SIZE;
#if SAVAGE_DMA_DEBUG
		/* Sanity check: all pages except the last one must be full. */
		if (i < dev_priv->current_dma_page &&
		    dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
			DRM_ERROR("partial DMA page %u: used=%u",
				  i, dev_priv->dma_pages[i].used);
		}
#endif
		BEGIN_BCI(dev_priv->dma_pages[i].used);
		for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
			BCI_WRITE(dma_ptr[j]);
		}
		dev_priv->dma_pages[i].used = 0;
	}

	/* reset to first page */
	dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
}

int savage_driver_load(struct drm_device *dev, unsigned long chipset)
{
	drm_savage_private_t *dev_priv;

	dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
	if (dev_priv == NULL)
		return -ENOMEM;

	memset(dev_priv, 0, sizeof(drm_savage_private_t));
	dev->dev_private = (void *)dev_priv;

	dev_priv->chipset = (enum savage_family)chipset;

	return 0;
}

/*
 * Initalize mappings. On Savage4 and SavageIX the alignment
 * and size of the aperture is not suitable for automatic MTRR setup
 * in drm_addmap. Therefore we add them manually before the maps are
 * initialized, and tear them down on last close.
 */
int savage_driver_firstopen(struct drm_device *dev)
{
	drm_savage_private_t *dev_priv = dev->dev_private;
	unsigned long mmio_base, fb_base, fb_size, aperture_base;
	/* fb_rsrc and aper_rsrc aren't really used currently, but still exist
	 * in case we decide we need information on the BAR for BSD in the
	 * future.
	 */
	unsigned int fb_rsrc, aper_rsrc;
	int ret = 0;

	dev_priv->mtrr[0].handle = -1;
	dev_priv->mtrr[1].handle = -1;
	dev_priv->mtrr[2].handle = -1;
	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
		fb_rsrc = 0;
		fb_base = drm_get_resource_start(dev, 0);
		fb_size = SAVAGE_FB_SIZE_S3;
		mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
		aper_rsrc = 0;
		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
		/* this should always be true */
		if (drm_get_resource_len(dev, 0) == 0x08000000) {
			/* Don't make MMIO write-cobining! We need 3
			 * MTRRs. */
			dev_priv->mtrr[0].base = fb_base;
			dev_priv->mtrr[0].size = 0x01000000;
			dev_priv->mtrr[0].handle =
			    drm_mtrr_add(dev_priv->mtrr[0].base,
					 dev_priv->mtrr[0].size, DRM_MTRR_WC);
			dev_priv->mtrr[1].base = fb_base + 0x02000000;
			dev_priv->mtrr[1].size = 0x02000000;
			dev_priv->mtrr[1].handle =
			    drm_mtrr_add(dev_priv->mtrr[1].base,
					 dev_priv->mtrr[1].size, DRM_MTRR_WC);
			dev_priv->mtrr[2].base = fb_base + 0x04000000;
			dev_priv->mtrr[2].size = 0x04000000;
			dev_priv->mtrr[2].handle =
			    drm_mtrr_add(dev_priv->mtrr[2].base,
				         dev_priv->mtrr[2].size, DRM_MTRR_WC);
		} else {
			DRM_ERROR("strange pci_resource_len %08lx\n",
				  drm_get_resource_len(dev, 0));
		}
	} else if (dev_priv->chipset != S3_SUPERSAVAGE &&
		   dev_priv->chipset != S3_SAVAGE2000) {
		mmio_base = drm_get_resource_start(dev, 0);
		fb_rsrc = 1;
		fb_base = drm_get_resource_start(dev, 1);
		fb_size = SAVAGE_FB_SIZE_S4;
		aper_rsrc = 1;
		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
		/* this should always be true */
		if (drm_get_resource_len(dev, 1) == 0x08000000) {
			/* Can use one MTRR to cover both fb and
			 * aperture. */
			dev_priv->mtrr[0].base = fb_base;
			dev_priv->mtrr[0].size = 0x08000000;
			dev_priv->mtrr[0].handle =
			    drm_mtrr_add(dev_priv->mtrr[0].base,
					 dev_priv->mtrr[0].size, DRM_MTRR_WC);
		} else {
			DRM_ERROR("strange pci_resource_len %08lx\n",
				  drm_get_resource_len(dev, 1));
		}
	} else {
		mmio_base = drm_get_resource_start(dev, 0);
		fb_rsrc = 1;
		fb_base = drm_get_resource_start(dev, 1);
		fb_size = drm_get_resource_len(dev, 1);
		aper_rsrc = 2;
		aperture_base = drm_get_resource_start(dev, 2);
		/* Automatic MTRR setup will do the right thing. */
	}

	ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
			 _DRM_READ_ONLY, &dev_priv->mmio);
	if (ret)
		return ret;

	ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
			 _DRM_WRITE_COMBINING, &dev_priv->fb);
	if (ret)
		return ret;

	ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
			 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
			 &dev_priv->aperture);
	if (ret)
		return ret;

	return ret;
}

/*
 * Delete MTRRs and free device-private data.
 */
void savage_driver_lastclose(struct drm_device *dev)
{
	drm_savage_private_t *dev_priv = dev->dev_private;
	int i;

	for (i = 0; i < 3; ++i)
		if (dev_priv->mtrr[i].handle >= 0)
			drm_mtrr_del(dev_priv->mtrr[i].handle,
				     dev_priv->mtrr[i].base,
				     dev_priv->mtrr[i].size, DRM_MTRR_WC);
}

int savage_driver_unload(struct drm_device *dev)
{
	drm_savage_private_t *dev_priv = dev->dev_private;

	drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);

	return 0;
}

static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
{
	drm_savage_private_t *dev_priv = dev->dev_private;

	if (init->fb_bpp != 16 && init->fb_bpp != 32) {
		DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
		return -EINVAL;
	}
	if (init->depth_bpp != 16 && init->depth_bpp != 32) {
		DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
		return -EINVAL;
	}
	if (init->dma_type != SAVAGE_DMA_AGP &&
	    init->dma_type != SAVAGE_DMA_PCI) {
		DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
		return -EINVAL;
	}

	dev_priv->cob_size = init->cob_size;
	dev_priv->bci_threshold_lo = init->bci_threshold_lo;
	dev_priv->bci_threshold_hi = init->bci_threshold_hi;
	dev_priv->dma_type = init->dma_type;

	dev_priv->fb_bpp = init->fb_bpp;
	dev_priv->front_offset = init->front_offset;
	dev_priv->front_pitch = init->front_pitch;
	dev_priv->back_offset = init->back_offset;
	dev_priv->back_pitch = init->back_pitch;
	dev_priv->depth_bpp = init->depth_bpp;
	dev_priv->depth_offset = init->depth_offset;
	dev_priv->depth_pitch = init->depth_pitch;

	dev_priv->texture_offset = init->texture_offset;
	dev_priv->texture_size = init->texture_size;

	dev_priv->sarea = drm_getsarea(dev);
	if (!dev_priv->sarea) {
		DRM_ERROR("could not find sarea!\n");
		savage_do_cleanup_bci(dev);
		return -EINVAL;
	}
	if (init->status_offset != 0) {
		dev_priv->status = drm_core_findmap(dev, init->status_offset);
		if (!dev_priv->status) {
			DRM_ERROR("could not find shadow status region!\n");
			savage_do_cleanup_bci(dev);
			return -EINVAL;
		}
	} else {
		dev_priv->status = NULL;
	}
	if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
		dev->agp_buffer_token = init->buffers_offset;
		dev->agp_buffer_map = drm_core_findmap(dev,
						       init->buffers_offset);
		if (!dev->agp_buffer_map) {
			DRM_ERROR("could not find DMA buffer region!\n");
			savage_do_cleanup_bci(dev);
			return -EINVAL;
		}
		drm_core_ioremap(dev->agp_buffer_map, dev);
		if (!dev->agp_buffer_map) {
			DRM_ERROR("failed to ioremap DMA buffer region!\n");
			savage_do_cleanup_bci(dev);
			return -ENOMEM;
		}
	}
	if (init->agp_textures_offset) {
		dev_priv->agp_textures =
			drm_core_findmap(dev, init->agp_textures_offset);
		if (!dev_priv->agp_textures) {
			DRM_ERROR("could not find agp texture region!\n");
			savage_do_cleanup_bci(dev);
			return -EINVAL;
		}
	} else {
		dev_priv->agp_textures = NULL;
	}

	if (init->cmd_dma_offset) {
		if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
			DRM_ERROR("command DMA not supported on "
				  "Savage3D/MX/IX.\n");
			savage_do_cleanup_bci(dev);
			return -EINVAL;
		}
		if (dev->dma && dev->dma->buflist) {
			DRM_ERROR("command and vertex DMA not supported "
				  "at the same time.\n");
			savage_do_cleanup_bci(dev);
			return -EINVAL;
		}
		dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
		if (!dev_priv->cmd_dma) {
			DRM_ERROR("could not find command DMA region!\n");
			savage_do_cleanup_bci(dev);
			return -EINVAL;
		}
		if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
			if (dev_priv->cmd_dma->type != _DRM_AGP) {
				DRM_ERROR("AGP command DMA region is not a "
					  "_DRM_AGP map!\n");
				savage_do_cleanup_bci(dev);
				return -EINVAL;
			}
			drm_core_ioremap(dev_priv->cmd_dma, dev);
			if (!dev_priv->cmd_dma->handle) {
				DRM_ERROR("failed to ioremap command "
					  "DMA region!\n");
				savage_do_cleanup_bci(dev);
				return -ENOMEM;
			}
		} else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
			DRM_ERROR("PCI command DMA region is not a "
				  "_DRM_CONSISTENT map!\n");
			savage_do_cleanup_bci(dev);
			return -EINVAL;
		}
	} else {
		dev_priv->cmd_dma = NULL;
	}

	dev_priv->dma_flush = savage_dma_flush;
	if (!dev_priv->cmd_dma) {
		DRM_DEBUG("falling back to faked command DMA.\n");
		dev_priv->fake_dma.offset = 0;
		dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
		dev_priv->fake_dma.type = _DRM_SHM;
		dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
						      DRM_MEM_DRIVER);
		if (!dev_priv->fake_dma.handle) {
			DRM_ERROR("could not allocate faked DMA buffer!\n");
			savage_do_cleanup_bci(dev);
			return -ENOMEM;
		}
		dev_priv->cmd_dma = &dev_priv->fake_dma;
		dev_priv->dma_flush = savage_fake_dma_flush;
	}

	dev_priv->sarea_priv =
		(drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle +
				       init->sarea_priv_offset);

	/* setup bitmap descriptors */
	{
		unsigned int color_tile_format;
		unsigned int depth_tile_format;
		unsigned int front_stride, back_stride, depth_stride;
		if (dev_priv->chipset <= S3_SAVAGE4) {
			color_tile_format = dev_priv->fb_bpp == 16 ?
				SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
			depth_tile_format = dev_priv->depth_bpp == 16 ?
				SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
		} else {
			color_tile_format = SAVAGE_BD_TILE_DEST;
			depth_tile_format = SAVAGE_BD_TILE_DEST;
		}
		front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
		back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
		depth_stride =
		    dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);

		dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
			(dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
			(color_tile_format << SAVAGE_BD_TILE_SHIFT);

		dev_priv-> back_bd =  back_stride | SAVAGE_BD_BW_DISABLE |
			(dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
			(color_tile_format << SAVAGE_BD_TILE_SHIFT);

		dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
			(dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
			(depth_tile_format << SAVAGE_BD_TILE_SHIFT);
	}

	/* setup status and bci ptr */
	dev_priv->event_counter = 0;
	dev_priv->event_wrap = 0;
	dev_priv->bci_ptr = (volatile uint32_t *)
	    ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
		dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
	} else {
		dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
	}
	if (dev_priv->status != NULL) {
		dev_priv->status_ptr =
			(volatile uint32_t *)dev_priv->status->handle;
		dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
		dev_priv->wait_evnt = savage_bci_wait_event_shadow;
		dev_priv->status_ptr[1023] = dev_priv->event_counter;
	} else {
		dev_priv->status_ptr = NULL;
		if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
			dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
		} else {
			dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
		}
		dev_priv->wait_evnt = savage_bci_wait_event_reg;
	}

	/* cliprect functions */
	if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
		dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
	else
		dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;

	if (savage_freelist_init(dev) < 0) {
		DRM_ERROR("could not initialize freelist\n");
		savage_do_cleanup_bci(dev);
		return -ENOMEM;
	}

	if (savage_dma_init(dev_priv) < 0) {
		DRM_ERROR("could not initialize command DMA\n");
		savage_do_cleanup_bci(dev);
		return -ENOMEM;
	}

	return 0;
}

static int savage_do_cleanup_bci(struct drm_device *dev)
{
	drm_savage_private_t *dev_priv = dev->dev_private;

	if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
		if (dev_priv->fake_dma.handle)
			drm_free(dev_priv->fake_dma.handle,
				 SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
	} else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
		   dev_priv->cmd_dma->type == _DRM_AGP &&
		   dev_priv->dma_type == SAVAGE_DMA_AGP)
		drm_core_ioremapfree(dev_priv->cmd_dma, dev);

	if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
	    dev->agp_buffer_map && dev->agp_buffer_map->handle) {
		drm_core_ioremapfree(dev->agp_buffer_map, dev);
		/* make sure the next instance (which may be running
		 * in PCI mode) doesn't try to use an old
		 * agp_buffer_map. */
		dev->agp_buffer_map = NULL;
	}

	if (dev_priv->dma_pages)
		drm_free(dev_priv->dma_pages,
			 sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages,
			 DRM_MEM_DRIVER);

	return 0;
}

static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_savage_init_t *init = data;

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	switch (init->func) {
	case SAVAGE_INIT_BCI:
		return savage_do_init_bci(dev, init);
	case SAVAGE_CLEANUP_BCI:
		return savage_do_cleanup_bci(dev);
	}

	return -EINVAL;
}

static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_savage_private_t *dev_priv = dev->dev_private;
	drm_savage_event_emit_t *event = data;

	DRM_DEBUG("\n");

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	event->count = savage_bci_emit_event(dev_priv, event->flags);
	event->count |= dev_priv->event_wrap << 16;

	return 0;
}

static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_savage_private_t *dev_priv = dev->dev_private;
	drm_savage_event_wait_t *event = data;
	unsigned int event_e, hw_e;
	unsigned int event_w, hw_w;

	DRM_DEBUG("\n");

	UPDATE_EVENT_COUNTER();
	if (dev_priv->status_ptr)
		hw_e = dev_priv->status_ptr[1] & 0xffff;
	else
		hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
	hw_w = dev_priv->event_wrap;
	if (hw_e > dev_priv->event_counter)
		hw_w--; /* hardware hasn't passed the last wrap yet */

	event_e = event->count & 0xffff;
	event_w = event->count >> 16;

	/* Don't need to wait if
	 * - event counter wrapped since the event was emitted or
	 * - the hardware has advanced up to or over the event to wait for.
	 */
	if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
		return 0;
	else
		return dev_priv->wait_evnt(dev_priv, event_e);
}

/*
 * DMA buffer management
 */

static int savage_bci_get_buffers(struct drm_device *dev,
				  struct drm_file *file_priv,
				  struct drm_dma *d)
{
	struct drm_buf *buf;
	int i;

	for (i = d->granted_count; i < d->request_count; i++) {
		buf = savage_freelist_get(dev);
		if (!buf)
			return -EAGAIN;

		buf->file_priv = file_priv;

		if (DRM_COPY_TO_USER(&d->request_indices[i],
				     &buf->idx, sizeof(buf->idx)))
			return -EFAULT;
		if (DRM_COPY_TO_USER(&d->request_sizes[i],
				     &buf->total, sizeof(buf->total)))
			return -EFAULT;

		d->granted_count++;
	}
	return 0;
}

int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_device_dma *dma = dev->dma;
	struct drm_dma *d = data;
	int ret = 0;

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	/* Please don't send us buffers.
	 */
	if (d->send_count != 0) {
		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
			  DRM_CURRENTPID, d->send_count);
		return -EINVAL;
	}

	/* We'll send you buffers.
	 */
	if (d->request_count < 0 || d->request_count > dma->buf_count) {
		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
			  DRM_CURRENTPID, d->request_count, dma->buf_count);
		return -EINVAL;
	}

	d->granted_count = 0;

	if (d->request_count) {
		ret = savage_bci_get_buffers(dev, file_priv, d);
	}

	return ret;
}

void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
{
	struct drm_device_dma *dma = dev->dma;
	drm_savage_private_t *dev_priv = dev->dev_private;
	int i;

	if (!dma)
		return;
	if (!dev_priv)
		return;
	if (!dma->buflist)
		return;

	for (i = 0; i < dma->buf_count; i++) {
		struct drm_buf *buf = dma->buflist[i];
		drm_savage_buf_priv_t *buf_priv = buf->dev_private;

		if (buf->file_priv == file_priv && buf_priv &&
		    buf_priv->next == NULL && buf_priv->prev == NULL) {
			uint16_t event;
			DRM_DEBUG("reclaimed from client\n");
			event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
			SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
			savage_freelist_put(dev, buf);
		}
	}

	drm_core_reclaim_buffers(dev, file_priv);
}

struct drm_ioctl_desc savage_ioctls[] = {
	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
};

int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
be null-terminated strings, so terminate them. */ if (version->name_len) version->name[version->name_len] = '\0'; if (version->date_len) version->date[version->date_len] = '\0'; if (version->desc_len) version->desc[version->desc_len] = '\0'; retval = drmMalloc(sizeof(*retval)); drmCopyVersion(retval, version); drmFreeKernelVersion(version); return retval; } /** * Get version information for the DRM user space library. * * This version number is driver independent. * * \param fd file descriptor. * * \return version information. * * \internal * This function allocates and fills a drm_version structure with a hard coded * version number. */ drmVersionPtr drmGetLibVersion(int fd) { drm_version_t *version = drmMalloc(sizeof(*version)); /* Version history: * NOTE THIS MUST NOT GO ABOVE VERSION 1.X due to drivers needing it * revision 1.0.x = original DRM interface with no drmGetLibVersion * entry point and many drm<Device> extensions * revision 1.1.x = added drmCommand entry points for device extensions * added drmGetLibVersion to identify libdrm.a version * revision 1.2.x = added drmSetInterfaceVersion * modified drmOpen to handle both busid and name * revision 1.3.x = added server + memory manager */ version->version_major = 1; version->version_minor = 3; version->version_patchlevel = 0; return (drmVersionPtr)version; } /** * Free the bus ID information. * * \param busid bus ID information string as given by drmGetBusid(). * * \internal * This function is just frees the memory pointed by \p busid. */ void drmFreeBusid(const char *busid) { drmFree((void *)busid); } /** * Get the bus ID of the device. * * \param fd file descriptor. * * \return bus ID string. * * \internal * This function gets the bus ID via successive DRM_IOCTL_GET_UNIQUE ioctls to * get the string length and data, passing the arguments in a drm_unique * structure. */ char *drmGetBusid(int fd) { drm_unique_t u; u.unique_len = 0; u.unique = NULL; if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) return NULL; u.unique = drmMalloc(u.unique_len + 1); if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) return NULL; u.unique[u.unique_len] = '\0'; return u.unique; } /** * Set the bus ID of the device. * * \param fd file descriptor. * \param busid bus ID string. * * \return zero on success, negative on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_SET_UNIQUE ioctl, passing * the arguments in a drm_unique structure. */ int drmSetBusid(int fd, const char *busid) { drm_unique_t u; u.unique = (char *)busid; u.unique_len = strlen(busid); if (ioctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) { return -errno; } return 0; } int drmGetMagic(int fd, drm_magic_t * magic) { drm_auth_t auth; *magic = 0; if (ioctl(fd, DRM_IOCTL_GET_MAGIC, &auth)) return -errno; *magic = auth.magic; return 0; } int drmAuthMagic(int fd, drm_magic_t magic) { drm_auth_t auth; auth.magic = magic; if (ioctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth)) return -errno; return 0; } /** * Specifies a range of memory that is available for mapping by a * non-root process. * * \param fd file descriptor. * \param offset usually the physical address. The actual meaning depends of * the \p type parameter. See below. * \param size of the memory in bytes. * \param type type of the memory to be mapped. * \param flags combination of several flags to modify the function actions. * \param handle will be set to a value that may be used as the offset * parameter for mmap(). * * \return zero on success or a negative value on error. * * \par Mapping the frame buffer * For the frame buffer * - \p offset will be the physical address of the start of the frame buffer, * - \p size will be the size of the frame buffer in bytes, and * - \p type will be DRM_FRAME_BUFFER. * * \par * The area mapped will be uncached. If MTRR support is available in the * kernel, the frame buffer area will be set to write combining. * * \par Mapping the MMIO register area * For the MMIO register area, * - \p offset will be the physical address of the start of the register area, * - \p size will be the size of the register area bytes, and * - \p type will be DRM_REGISTERS. * \par * The area mapped will be uncached. * * \par Mapping the SAREA * For the SAREA, * - \p offset will be ignored and should be set to zero, * - \p size will be the desired size of the SAREA in bytes, * - \p type will be DRM_SHM. * * \par * A shared memory area of the requested size will be created and locked in * kernel memory. This area may be mapped into client-space by using the handle * returned. * * \note May only be called by root. * * \internal * This function is a wrapper around the DRM_IOCTL_ADD_MAP ioctl, passing * the arguments in a drm_map structure. */ int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type, drmMapFlags flags, drm_handle_t *handle) { drm_map_t map; map.offset = offset; map.size = size; map.handle = 0; map.type = type; map.flags = flags; if (ioctl(fd, DRM_IOCTL_ADD_MAP, &map)) return -errno; if (handle) *handle = (drm_handle_t)map.handle; return 0; } int drmRmMap(int fd, drm_handle_t handle) { drm_map_t map; map.handle = (void *)handle; if(ioctl(fd, DRM_IOCTL_RM_MAP, &map)) return -errno; return 0; } /** * Make buffers available for DMA transfers. * * \param fd file descriptor. * \param count number of buffers. * \param size size of each buffer. * \param flags buffer allocation flags. * \param agp_offset offset in the AGP aperture * * \return number of buffers allocated, negative on error. * * \internal * This function is a wrapper around DRM_IOCTL_ADD_BUFS ioctl. * * \sa drm_buf_desc. */ int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags, int agp_offset) { drm_buf_desc_t request; request.count = count; request.size = size; request.low_mark = 0; request.high_mark = 0; request.flags = flags; request.agp_start = agp_offset; if (ioctl(fd, DRM_IOCTL_ADD_BUFS, &request)) return -errno; return request.count; } int drmMarkBufs(int fd, double low, double high) { drm_buf_info_t info; int i; info.count = 0; info.list = NULL; if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) return -EINVAL; if (!info.count) return -EINVAL; if (!(info.list = drmMalloc(info.count * sizeof(*info.list)))) return -ENOMEM; if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) { int retval = -errno; drmFree(info.list); return retval; } for (i = 0; i < info.count; i++) { info.list[i].low_mark = low * info.list[i].count; info.list[i].high_mark = high * info.list[i].count; if (ioctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) { int retval = -errno; drmFree(info.list); return retval; } } drmFree(info.list); return 0; } /** * Free buffers. * * \param fd file descriptor. * \param count number of buffers to free. * \param list list of buffers to be freed. * * \return zero on success, or a negative value on failure. * * \note This function is primarily used for debugging. * * \internal * This function is a wrapper around the DRM_IOCTL_FREE_BUFS ioctl, passing * the arguments in a drm_buf_free structure. */ int drmFreeBufs(int fd, int count, int *list) { drm_buf_free_t request; request.count = count; request.list = list; if (ioctl(fd, DRM_IOCTL_FREE_BUFS, &request)) return -errno; return 0; } /** * Close the device. * * \param fd file descriptor. * * \internal * This function closes the file descriptor. */ int drmClose(int fd) { unsigned long key = drmGetKeyFromFd(fd); drmHashEntry *entry = drmGetEntry(fd); drmHashDestroy(entry->tagTable); entry->fd = 0; entry->f = NULL; entry->tagTable = NULL; drmHashDelete(drmHashTable, key); drmFree(entry); return close(fd); } /** * Map a region of memory. * * \param fd file descriptor. * \param handle handle returned by drmAddMap(). * \param size size in bytes. Must match the size used by drmAddMap(). * \param address will contain the user-space virtual address where the mapping * begins. * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper for mmap(). */ int drmMap(int fd, drm_handle_t handle, drmSize size, drmAddressPtr address) { static unsigned long pagesize_mask = 0; if (fd < 0) return -EINVAL; if (!pagesize_mask) pagesize_mask = getpagesize() - 1; size = (size + pagesize_mask) & ~pagesize_mask; *address = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, handle); if (*address == MAP_FAILED) return -errno; return 0; } /** * Unmap mappings obtained with drmMap(). * * \param address address as given by drmMap(). * \param size size in bytes. Must match the size used by drmMap(). * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper for munmap(). */ int drmUnmap(drmAddress address, drmSize size) { return munmap(address, size); } drmBufInfoPtr drmGetBufInfo(int fd) { drm_buf_info_t info; drmBufInfoPtr retval; int i; info.count = 0; info.list = NULL; if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) return NULL; if (info.count) { if (!(info.list = drmMalloc(info.count * sizeof(*info.list)))) return NULL; if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) { drmFree(info.list); return NULL; } retval = drmMalloc(sizeof(*retval)); retval->count = info.count; retval->list = drmMalloc(info.count * sizeof(*retval->list)); for (i = 0; i < info.count; i++) { retval->list[i].count = info.list[i].count; retval->list[i].size = info.list[i].size; retval->list[i].low_mark = info.list[i].low_mark; retval->list[i].high_mark = info.list[i].high_mark; } drmFree(info.list); return retval; } return NULL; } /** * Map all DMA buffers into client-virtual space. * * \param fd file descriptor. * * \return a pointer to a ::drmBufMap structure. * * \note The client may not use these buffers until obtaining buffer indices * with drmDMA(). * * \internal * This function calls the DRM_IOCTL_MAP_BUFS ioctl and copies the returned * information about the buffers in a drm_buf_map structure into the * client-visible data structures. */ drmBufMapPtr drmMapBufs(int fd) { drm_buf_map_t bufs; drmBufMapPtr retval; int i; bufs.count = 0; bufs.list = NULL; bufs.virtual = NULL; if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) return NULL; if (!bufs.count) return NULL; if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list)))) return NULL; if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) { drmFree(bufs.list); return NULL; } retval = drmMalloc(sizeof(*retval)); retval->count = bufs.count; retval->list = drmMalloc(bufs.count * sizeof(*retval->list)); for (i = 0; i < bufs.count; i++) { retval->list[i].idx = bufs.list[i].idx; retval->list[i].total = bufs.list[i].total; retval->list[i].used = 0; retval->list[i].address = bufs.list[i].address; } drmFree(bufs.list); return retval; } /** * Unmap buffers allocated with drmMapBufs(). * * \return zero on success, or negative value on failure. * * \internal * Calls munmap() for every buffer stored in \p bufs and frees the * memory allocated by drmMapBufs(). */ int drmUnmapBufs(drmBufMapPtr bufs) { int i; for (i = 0; i < bufs->count; i++) { munmap(bufs->list[i].address, bufs->list[i].total); } drmFree(bufs->list); drmFree(bufs); return 0; } #define DRM_DMA_RETRY 16 /** * Reserve DMA buffers. * * \param fd file descriptor. * \param request * * \return zero on success, or a negative value on failure. * * \internal * Assemble the arguments into a drm_dma structure and keeps issuing the * DRM_IOCTL_DMA ioctl until success or until maximum number of retries. */ int drmDMA(int fd, drmDMAReqPtr request) { drm_dma_t dma; int ret, i = 0; dma.context = request->context; dma.send_count = request->send_count; dma.send_indices = request->send_list; dma.send_sizes = request->send_sizes; dma.flags = request->flags; dma.request_count = request->request_count; dma.request_size = request->request_size; dma.request_indices = request->request_list; dma.request_sizes = request->request_sizes; dma.granted_count = 0; do { ret = ioctl( fd, DRM_IOCTL_DMA, &dma ); } while ( ret && errno == EAGAIN && i++ < DRM_DMA_RETRY ); if ( ret == 0 ) { request->granted_count = dma.granted_count; return 0; } else { return -errno; } } /** * Obtain heavyweight hardware lock. * * \param fd file descriptor. * \param context context. * \param flags flags that determine the sate of the hardware when the function * returns. * * \return always zero. * * \internal * This function translates the arguments into a drm_lock structure and issue * the DRM_IOCTL_LOCK ioctl until the lock is successfully acquired. */ int drmGetLock(int fd, drm_context_t context, drmLockFlags flags) { drm_lock_t lock; lock.context = context; lock.flags = 0; if (flags & DRM_LOCK_READY) lock.flags |= _DRM_LOCK_READY; if (flags & DRM_LOCK_QUIESCENT) lock.flags |= _DRM_LOCK_QUIESCENT; if (flags & DRM_LOCK_FLUSH) lock.flags |= _DRM_LOCK_FLUSH; if (flags & DRM_LOCK_FLUSH_ALL) lock.flags |= _DRM_LOCK_FLUSH_ALL; if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES; if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES; while (ioctl(fd, DRM_IOCTL_LOCK, &lock)) ; return 0; } /** * Release the hardware lock. * * \param fd file descriptor. * \param context context. * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_UNLOCK ioctl, passing the * argument in a drm_lock structure. */ int drmUnlock(int fd, drm_context_t context) { drm_lock_t lock; lock.context = context; lock.flags = 0; return ioctl(fd, DRM_IOCTL_UNLOCK, &lock); } drm_context_t *drmGetReservedContextList(int fd, int *count) { drm_ctx_res_t res; drm_ctx_t *list; drm_context_t * retval; int i; res.count = 0; res.contexts = NULL; if (ioctl(fd, DRM_IOCTL_RES_CTX, &res)) return NULL; if (!res.count) return NULL; if (!(list = drmMalloc(res.count * sizeof(*list)))) return NULL; if (!(retval = drmMalloc(res.count * sizeof(*retval)))) { drmFree(list); return NULL; } res.contexts = list; if (ioctl(fd, DRM_IOCTL_RES_CTX, &res)) return NULL; for (i = 0; i < res.count; i++) retval[i] = list[i].handle; drmFree(list); *count = res.count; return retval; } void drmFreeReservedContextList(drm_context_t *pt) { drmFree(pt); } /** * Create context. * * Used by the X server during GLXContext initialization. This causes * per-context kernel-level resources to be allocated. * * \param fd file descriptor. * \param handle is set on success. To be used by the client when requesting DMA * dispatch with drmDMA(). * * \return zero on success, or a negative value on failure. * * \note May only be called by root. * * \internal * This function is a wrapper around the DRM_IOCTL_ADD_CTX ioctl, passing the * argument in a drm_ctx structure. */ int drmCreateContext(int fd, drm_context_t *handle) { drm_ctx_t ctx; ctx.flags = 0; /* Modified with functions below */ if (ioctl(fd, DRM_IOCTL_ADD_CTX, &ctx)) return -errno; *handle = ctx.handle; return 0; } int drmSwitchToContext(int fd, drm_context_t context) { drm_ctx_t ctx; ctx.handle = context; if (ioctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx)) return -errno; return 0; } int drmSetContextFlags(int fd, drm_context_t context, drm_context_tFlags flags) { drm_ctx_t ctx; /* * Context preserving means that no context switches are done between DMA * buffers from one context and the next. This is suitable for use in the * X server (which promises to maintain hardware context), or in the * client-side library when buffers are swapped on behalf of two threads. */ ctx.handle = context; ctx.flags = 0; if (flags & DRM_CONTEXT_PRESERVED) ctx.flags |= _DRM_CONTEXT_PRESERVED; if (flags & DRM_CONTEXT_2DONLY) ctx.flags |= _DRM_CONTEXT_2DONLY; if (ioctl(fd, DRM_IOCTL_MOD_CTX, &ctx)) return -errno; return 0; } int drmGetContextFlags(int fd, drm_context_t context, drm_context_tFlagsPtr flags) { drm_ctx_t ctx; ctx.handle = context; if (ioctl(fd, DRM_IOCTL_GET_CTX, &ctx)) return -errno; *flags = 0; if (ctx.flags & _DRM_CONTEXT_PRESERVED) *flags |= DRM_CONTEXT_PRESERVED; if (ctx.flags & _DRM_CONTEXT_2DONLY) *flags |= DRM_CONTEXT_2DONLY; return 0; } /** * Destroy context. * * Free any kernel-level resources allocated with drmCreateContext() associated * with the context. * * \param fd file descriptor. * \param handle handle given by drmCreateContext(). * * \return zero on success, or a negative value on failure. * * \note May only be called by root. * * \internal * This function is a wrapper around the DRM_IOCTL_RM_CTX ioctl, passing the * argument in a drm_ctx structure. */ int drmDestroyContext(int fd, drm_context_t handle) { drm_ctx_t ctx; ctx.handle = handle; if (ioctl(fd, DRM_IOCTL_RM_CTX, &ctx)) return -errno; return 0; } int drmCreateDrawable(int fd, drm_drawable_t *handle) { drm_draw_t draw; if (ioctl(fd, DRM_IOCTL_ADD_DRAW, &draw)) return -errno; *handle = draw.handle; return 0; } int drmDestroyDrawable(int fd, drm_drawable_t handle) { drm_draw_t draw; draw.handle = handle; if (ioctl(fd, DRM_IOCTL_RM_DRAW, &draw)) return -errno; return 0; } int drmUpdateDrawableInfo(int fd, drm_drawable_t handle, drm_drawable_info_type_t type, unsigned int num, void *data) { drm_update_draw_t update; update.handle = handle; update.type = type; update.num = num; update.data = (unsigned long long)(unsigned long)data; if (ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update)) return -errno; return 0; } /** * Acquire the AGP device. * * Must be called before any of the other AGP related calls. * * \param fd file descriptor. * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_ACQUIRE ioctl. */ int drmAgpAcquire(int fd) { if (ioctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL)) return -errno; return 0; } /** * Release the AGP device. * * \param fd file descriptor. * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_RELEASE ioctl. */ int drmAgpRelease(int fd) { if (ioctl(fd, DRM_IOCTL_AGP_RELEASE, NULL)) return -errno; return 0; } /** * Set the AGP mode. * * \param fd file descriptor. * \param mode AGP mode. * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_ENABLE ioctl, passing the * argument in a drm_agp_mode structure. */ int drmAgpEnable(int fd, unsigned long mode) { drm_agp_mode_t m; m.mode = mode; if (ioctl(fd, DRM_IOCTL_AGP_ENABLE, &m)) return -errno; return 0; } /** * Allocate a chunk of AGP memory. * * \param fd file descriptor. * \param size requested memory size in bytes. Will be rounded to page boundary. * \param type type of memory to allocate. * \param address if not zero, will be set to the physical address of the * allocated memory. * \param handle on success will be set to a handle of the allocated memory. * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_ALLOC ioctl, passing the * arguments in a drm_agp_buffer structure. */ int drmAgpAlloc(int fd, unsigned long size, unsigned long type, unsigned long *address, drm_handle_t *handle) { drm_agp_buffer_t b; *handle = DRM_AGP_NO_HANDLE; b.size = size; b.handle = 0; b.type = type; if (ioctl(fd, DRM_IOCTL_AGP_ALLOC, &b)) return -errno; if (address != 0UL) *address = b.physical; *handle = b.handle; return 0; } /** * Free a chunk of AGP memory. * * \param fd file descriptor. * \param handle handle to the allocated memory, as given by drmAgpAllocate(). * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_FREE ioctl, passing the * argument in a drm_agp_buffer structure. */ int drmAgpFree(int fd, drm_handle_t handle) { drm_agp_buffer_t b; b.size = 0; b.handle = handle; if (ioctl(fd, DRM_IOCTL_AGP_FREE, &b)) return -errno; return 0; } /** * Bind a chunk of AGP memory. * * \param fd file descriptor. * \param handle handle to the allocated memory, as given by drmAgpAllocate(). * \param offset offset in bytes. It will round to page boundary. * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_BIND ioctl, passing the * argument in a drm_agp_binding structure. */ int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset) { drm_agp_binding_t b; b.handle = handle; b.offset = offset; if (ioctl(fd, DRM_IOCTL_AGP_BIND, &b)) return -errno; return 0; } /** * Unbind a chunk of AGP memory. * * \param fd file descriptor. * \param handle handle to the allocated memory, as given by drmAgpAllocate(). * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_UNBIND ioctl, passing * the argument in a drm_agp_binding structure. */ int drmAgpUnbind(int fd, drm_handle_t handle) { drm_agp_binding_t b; b.handle = handle; b.offset = 0; if (ioctl(fd, DRM_IOCTL_AGP_UNBIND, &b)) return -errno; return 0; } /** * Get AGP driver major version number. * * \param fd file descriptor. * * \return major version number on success, or a negative value on failure.. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the * necessary information in a drm_agp_info structure. */ int drmAgpVersionMajor(int fd) { drm_agp_info_t i; if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return -errno; return i.agp_version_major; } /** * Get AGP driver minor version number. * * \param fd file descriptor. * * \return minor version number on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the * necessary information in a drm_agp_info structure. */ int drmAgpVersionMinor(int fd) { drm_agp_info_t i; if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return -errno; return i.agp_version_minor; } /** * Get AGP mode. * * \param fd file descriptor. * * \return mode on success, or zero on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the * necessary information in a drm_agp_info structure. */ unsigned long drmAgpGetMode(int fd) { drm_agp_info_t i; if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.mode; } /** * Get AGP aperture base. * * \param fd file descriptor. * * \return aperture base on success, zero on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the * necessary information in a drm_agp_info structure. */ unsigned long drmAgpBase(int fd) { drm_agp_info_t i; if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.aperture_base; } /** * Get AGP aperture size. * * \param fd file descriptor. * * \return aperture size on success, zero on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the * necessary information in a drm_agp_info structure. */ unsigned long drmAgpSize(int fd) { drm_agp_info_t i; if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.aperture_size; } /** * Get used AGP memory. * * \param fd file descriptor. * * \return memory used on success, or zero on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the * necessary information in a drm_agp_info structure. */ unsigned long drmAgpMemoryUsed(int fd) { drm_agp_info_t i; if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.memory_used; } /** * Get available AGP memory. * * \param fd file descriptor. * * \return memory available on success, or zero on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the * necessary information in a drm_agp_info structure. */ unsigned long drmAgpMemoryAvail(int fd) { drm_agp_info_t i; if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.memory_allowed; } /** * Get hardware vendor ID. * * \param fd file descriptor. * * \return vendor ID on success, or zero on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the * necessary information in a drm_agp_info structure. */ unsigned int drmAgpVendorId(int fd) { drm_agp_info_t i; if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.id_vendor; } /** * Get hardware device ID. * * \param fd file descriptor. * * \return zero on success, or zero on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the * necessary information in a drm_agp_info structure. */ unsigned int drmAgpDeviceId(int fd) { drm_agp_info_t i; if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0; return i.id_device; } int drmScatterGatherAlloc(int fd, unsigned long size, drm_handle_t *handle) { drm_scatter_gather_t sg; *handle = 0; sg.size = size; sg.handle = 0; if (ioctl(fd, DRM_IOCTL_SG_ALLOC, &sg)) return -errno; *handle = sg.handle; return 0; } int drmScatterGatherFree(int fd, drm_handle_t handle) { drm_scatter_gather_t sg; sg.size = 0; sg.handle = handle; if (ioctl(fd, DRM_IOCTL_SG_FREE, &sg)) return -errno; return 0; } /** * Wait for VBLANK. * * \param fd file descriptor. * \param vbl pointer to a drmVBlank structure. * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_WAIT_VBLANK ioctl. */ int drmWaitVBlank(int fd, drmVBlankPtr vbl) { int ret; do { ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl); vbl->request.type &= ~DRM_VBLANK_RELATIVE; } while (ret && errno == EINTR); return ret; } int drmError(int err, const char *label) { switch (err) { case DRM_ERR_NO_DEVICE: fprintf(stderr, "%s: no device\n", label); break; case DRM_ERR_NO_ACCESS: fprintf(stderr, "%s: no access\n", label); break; case DRM_ERR_NOT_ROOT: fprintf(stderr, "%s: not root\n", label); break; case DRM_ERR_INVALID: fprintf(stderr, "%s: invalid args\n", label); break; default: if (err < 0) err = -err; fprintf( stderr, "%s: error %d (%s)\n", label, err, strerror(err) ); break; } return 1; } /** * Install IRQ handler. * * \param fd file descriptor. * \param irq IRQ number. * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_CONTROL ioctl, passing the * argument in a drm_control structure. */ int drmCtlInstHandler(int fd, int irq) { drm_control_t ctl; ctl.func = DRM_INST_HANDLER; ctl.irq = irq; if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl)) return -errno; return 0; } /** * Uninstall IRQ handler. * * \param fd file descriptor. * * \return zero on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_CONTROL ioctl, passing the * argument in a drm_control structure. */ int drmCtlUninstHandler(int fd) { drm_control_t ctl; ctl.func = DRM_UNINST_HANDLER; ctl.irq = 0; if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl)) return -errno; return 0; } int drmFinish(int fd, int context, drmLockFlags flags) { drm_lock_t lock; lock.context = context; lock.flags = 0; if (flags & DRM_LOCK_READY) lock.flags |= _DRM_LOCK_READY; if (flags & DRM_LOCK_QUIESCENT) lock.flags |= _DRM_LOCK_QUIESCENT; if (flags & DRM_LOCK_FLUSH) lock.flags |= _DRM_LOCK_FLUSH; if (flags & DRM_LOCK_FLUSH_ALL) lock.flags |= _DRM_LOCK_FLUSH_ALL; if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES; if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES; if (ioctl(fd, DRM_IOCTL_FINISH, &lock)) return -errno; return 0; } /** * Get IRQ from bus ID. * * \param fd file descriptor. * \param busnum bus number. * \param devnum device number. * \param funcnum function number. * * \return IRQ number on success, or a negative value on failure. * * \internal * This function is a wrapper around the DRM_IOCTL_IRQ_BUSID ioctl, passing the * arguments in a drm_irq_busid structure. */ int drmGetInterruptFromBusID(int fd, int busnum, int devnum, int funcnum) { drm_irq_busid_t p; p.busnum = busnum; p.devnum = devnum; p.funcnum = funcnum; if (ioctl(fd, DRM_IOCTL_IRQ_BUSID, &p)) return -errno; return p.irq; } int drmAddContextTag(int fd, drm_context_t context, void *tag) { drmHashEntry *entry = drmGetEntry(fd); if (drmHashInsert(entry->tagTable, context, tag)) { drmHashDelete(entry->tagTable, context); drmHashInsert(entry->tagTable, context, tag); } return 0; } int drmDelContextTag(int fd, drm_context_t context) { drmHashEntry *entry = drmGetEntry(fd); return drmHashDelete(entry->tagTable, context); } void *drmGetContextTag(int fd, drm_context_t context) { drmHashEntry *entry = drmGetEntry(fd); void *value; if (drmHashLookup(entry->tagTable, context, &value)) return NULL; return value; } int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id, drm_handle_t handle) { drm_ctx_priv_map_t map; map.ctx_id = ctx_id; map.handle = (void *)handle; if (ioctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map)) return -errno; return 0; } int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id, drm_handle_t *handle) { drm_ctx_priv_map_t map; map.ctx_id = ctx_id; if (ioctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map)) return -errno; if (handle) *handle = (drm_handle_t)map.handle; return 0; } int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size, drmMapType *type, drmMapFlags *flags, drm_handle_t *handle, int *mtrr) { drm_map_t map; map.offset = idx; if (ioctl(fd, DRM_IOCTL_GET_MAP, &map)) return -errno; *offset = map.offset; *size = map.size; *type = map.type; *flags = map.flags; *handle = (unsigned long)map.handle; *mtrr = map.mtrr; return 0; } int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid, unsigned long *magic, unsigned long *iocs) { drm_client_t client; client.idx = idx; if (ioctl(fd, DRM_IOCTL_GET_CLIENT, &client)) return -errno; *auth = client.auth; *pid = client.pid; *uid = client.uid; *magic = client.magic; *iocs = client.iocs; return 0; } int drmGetStats(int fd, drmStatsT *stats) { drm_stats_t s; int i; if (ioctl(fd, DRM_IOCTL_GET_STATS, &s)) return -errno; stats->count = 0; memset(stats, 0, sizeof(*stats)); if (s.count > sizeof(stats->data)/sizeof(stats->data[0])) return -1; #define SET_VALUE \ stats->data[i].long_format = "%-20.20s"; \ stats->data[i].rate_format = "%8.8s"; \ stats->data[i].isvalue = 1; \ stats->data[i].verbose = 0 #define SET_COUNT \ stats->data[i].long_format = "%-20.20s"; \ stats->data[i].rate_format = "%5.5s"; \ stats->data[i].isvalue = 0; \ stats->data[i].mult_names = "kgm"; \ stats->data[i].mult = 1000; \ stats->data[i].verbose = 0 #define SET_BYTE \ stats->data[i].long_format = "%-20.20s"; \ stats->data[i].rate_format = "%5.5s"; \ stats->data[i].isvalue = 0; \ stats->data[i].mult_names = "KGM"; \ stats->data[i].mult = 1024; \ stats->data[i].verbose = 0 stats->count = s.count; for (i = 0; i < s.count; i++) { stats->data[i].value = s.data[i].value; switch (s.data[i].type) { case _DRM_STAT_LOCK: stats->data[i].long_name = "Lock"; stats->data[i].rate_name = "Lock"; SET_VALUE; break; case _DRM_STAT_OPENS: stats->data[i].long_name = "Opens"; stats->data[i].rate_name = "O"; SET_COUNT; stats->data[i].verbose = 1; break; case _DRM_STAT_CLOSES: stats->data[i].long_name = "Closes"; stats->data[i].rate_name = "Lock"; SET_COUNT; stats->data[i].verbose = 1; break; case _DRM_STAT_IOCTLS: stats->data[i].long_name = "Ioctls"; stats->data[i].rate_name = "Ioc/s"; SET_COUNT; break; case _DRM_STAT_LOCKS: stats->data[i].long_name = "Locks"; stats->data[i].rate_name = "Lck/s"; SET_COUNT; break; case _DRM_STAT_UNLOCKS: stats->data[i].long_name = "Unlocks"; stats->data[i].rate_name = "Unl/s"; SET_COUNT; break; case _DRM_STAT_IRQ: stats->data[i].long_name = "IRQs"; stats->data[i].rate_name = "IRQ/s"; SET_COUNT; break; case _DRM_STAT_PRIMARY: stats->data[i].long_name = "Primary Bytes"; stats->data[i].rate_name = "PB/s"; SET_BYTE; break; case _DRM_STAT_SECONDARY: stats->data[i].long_name = "Secondary Bytes"; stats->data[i].rate_name = "SB/s"; SET_BYTE; break; case _DRM_STAT_DMA: stats->data[i].long_name = "DMA"; stats->data[i].rate_name = "DMA/s"; SET_COUNT; break; case _DRM_STAT_SPECIAL: stats->data[i].long_name = "Special DMA"; stats->data[i].rate_name = "dma/s"; SET_COUNT; break; case _DRM_STAT_MISSED: stats->data[i].long_name = "Miss"; stats->data[i].rate_name = "Ms/s"; SET_COUNT; break; case _DRM_STAT_VALUE: stats->data[i].long_name = "Value"; stats->data[i].rate_name = "Value"; SET_VALUE; break; case _DRM_STAT_BYTE: stats->data[i].long_name = "Bytes"; stats->data[i].rate_name = "B/s"; SET_BYTE; break; case _DRM_STAT_COUNT: default: stats->data[i].long_name = "Count"; stats->data[i].rate_name = "Cnt/s"; SET_COUNT; break; } } return 0; } /** * Issue a set-version ioctl. * * \param fd file descriptor. * \param drmCommandIndex command index * \param data source pointer of the data to be read and written. * \param size size of the data to be read and written. * * \return zero on success, or a negative value on failure. * * \internal * It issues a read-write ioctl given by * \code DRM_COMMAND_BASE + drmCommandIndex \endcode. */ int drmSetInterfaceVersion(int fd, drmSetVersion *version) { int retcode = 0; drm_set_version_t sv; sv.drm_di_major = version->drm_di_major; sv.drm_di_minor = version->drm_di_minor; sv.drm_dd_major = version->drm_dd_major; sv.drm_dd_minor = version->drm_dd_minor; if (ioctl(fd, DRM_IOCTL_SET_VERSION, &sv)) { retcode = -errno; } version->drm_di_major = sv.drm_di_major; version->drm_di_minor = sv.drm_di_minor; version->drm_dd_major = sv.drm_dd_major; version->drm_dd_minor = sv.drm_dd_minor; return retcode; } /** * Send a device-specific command. * * \param fd file descriptor. * \param drmCommandIndex command index * * \return zero on success, or a negative value on failure. * * \internal * It issues a ioctl given by * \code DRM_COMMAND_BASE + drmCommandIndex \endcode. */ int drmCommandNone(int fd, unsigned long drmCommandIndex) { void *data = NULL; /* dummy */ unsigned long request; request = DRM_IO( DRM_COMMAND_BASE + drmCommandIndex); if (ioctl(fd, request, data)) { return -errno; } return 0; } /** * Send a device-specific read command. * * \param fd file descriptor. * \param drmCommandIndex command index * \param data destination pointer of the data to be read. * \param size size of the data to be read. * * \return zero on success, or a negative value on failure. * * \internal * It issues a read ioctl given by * \code DRM_COMMAND_BASE + drmCommandIndex \endcode. */ int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data, unsigned long size) { unsigned long request; request = DRM_IOC( DRM_IOC_READ, DRM_IOCTL_BASE, DRM_COMMAND_BASE + drmCommandIndex, size); if (ioctl(fd, request, data)) { return -errno; } return 0; } /** * Send a device-specific write command. * * \param fd file descriptor. * \param drmCommandIndex command index * \param data source pointer of the data to be written. * \param size size of the data to be written. * * \return zero on success, or a negative value on failure. * * \internal * It issues a write ioctl given by * \code DRM_COMMAND_BASE + drmCommandIndex \endcode. */ int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data, unsigned long size) { unsigned long request; request = DRM_IOC( DRM_IOC_WRITE, DRM_IOCTL_BASE, DRM_COMMAND_BASE + drmCommandIndex, size); if (ioctl(fd, request, data)) { return -errno; } return 0; } /** * Send a device-specific read-write command. * * \param fd file descriptor. * \param drmCommandIndex command index * \param data source pointer of the data to be read and written. * \param size size of the data to be read and written. * * \return zero on success, or a negative value on failure. * * \internal * It issues a read-write ioctl given by * \code DRM_COMMAND_BASE + drmCommandIndex \endcode. */ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data, unsigned long size) { unsigned long request; request = DRM_IOC( DRM_IOC_READ|DRM_IOC_WRITE, DRM_IOCTL_BASE, DRM_COMMAND_BASE + drmCommandIndex, size); if (ioctl(fd, request, data)) { return -errno; } return 0; } /* * Valid flags are * DRM_FENCE_FLAG_EMIT * DRM_FENCE_FLAG_SHAREABLE * DRM_FENCE_MASK_DRIVER */ int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type, drmFence *fence) { drm_fence_arg_t arg; memset(&arg, 0, sizeof(arg)); arg.flags = flags; arg.type = type; arg.fence_class = fence_class; if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg)) return -errno; fence->handle = arg.handle; fence->fence_class = arg.fence_class; fence->type = arg.type; fence->flags = arg.flags; fence->signaled = 0; return 0; } /* * Valid flags are * DRM_FENCE_FLAG_SHAREABLE * DRM_FENCE_MASK_DRIVER */ int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fence) { drm_fence_arg_t arg; memset(&arg, 0, sizeof(arg)); arg.flags = flags; arg.fence_class = fence_class; if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg)) return -errno; fence->handle = arg.handle; fence->fence_class = arg.fence_class; fence->type = arg.type; fence->flags = arg.flags; fence->sequence = arg.sequence; fence->signaled = 0; return 0; } int drmFenceReference(int fd, unsigned handle, drmFence *fence) { drm_fence_arg_t arg; memset(&arg, 0, sizeof(arg)); arg.handle = handle; if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg)) return -errno; fence->handle = arg.handle; fence->fence_class = arg.fence_class; fence->type = arg.type; fence->flags = arg.flags; fence->signaled = arg.signaled; return 0; } int drmFenceUnreference(int fd, const drmFence *fence) { drm_fence_arg_t arg; memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg)) return -errno; return 0; } int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type) { drm_fence_arg_t arg; memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; arg.type = flush_type; if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg)) return -errno; fence->fence_class = arg.fence_class; fence->type = arg.type; fence->signaled = arg.signaled; return arg.error; } int drmFenceUpdate(int fd, drmFence *fence) { drm_fence_arg_t arg; memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg)) return -errno; fence->fence_class = arg.fence_class; fence->type = arg.type; fence->signaled = arg.signaled; return 0; } int drmFenceSignaled(int fd, drmFence *fence, unsigned fenceType, int *signaled) { if ((fence->flags & DRM_FENCE_FLAG_SHAREABLE) || ((fenceType & fence->signaled) != fenceType)) { int ret = drmFenceFlush(fd, fence, fenceType); if (ret) return ret; } *signaled = ((fenceType & fence->signaled) == fenceType); return 0; } /* * Valid flags are * DRM_FENCE_FLAG_SHAREABLE * DRM_FENCE_MASK_DRIVER */ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type) { drm_fence_arg_t arg; memset(&arg, 0, sizeof(arg)); arg.fence_class = fence->fence_class; arg.flags = flags; arg.handle = fence->handle; arg.type = emit_type; if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg)) return -errno; fence->fence_class = arg.fence_class; fence->type = arg.type; fence->signaled = arg.signaled; fence->sequence = arg.sequence; return 0; } /* * Valid flags are * DRM_FENCE_FLAG_WAIT_LAZY * DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS */ #define DRM_IOCTL_TIMEOUT_USEC 3000000UL static unsigned long drmTimeDiff(struct timeval *now, struct timeval *then) { uint64_t val; val = now->tv_sec - then->tv_sec; val *= 1000000LL; val += now->tv_usec; val -= then->tv_usec; return (unsigned long) val; } static int drmIoctlTimeout(int fd, unsigned long request, void *argp) { int haveThen = 0; struct timeval then, now; int ret; do { ret = ioctl(fd, request, argp); if (ret != 0 && errno == EAGAIN) { if (!haveThen) { gettimeofday(&then, NULL); haveThen = 1; } gettimeofday(&now, NULL); } } while (ret != 0 && errno == EAGAIN && drmTimeDiff(&now, &then) < DRM_IOCTL_TIMEOUT_USEC); if (ret != 0) return ((errno == EAGAIN) ? -EBUSY : -errno); return 0; } int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type) { drm_fence_arg_t arg; int ret; if (flush_type == 0) { flush_type = fence->type; } if (!(fence->flags & DRM_FENCE_FLAG_SHAREABLE)) { if ((flush_type & fence->signaled) == flush_type) { return 0; } } memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; arg.type = flush_type; arg.flags = flags; ret = drmIoctlTimeout(fd, DRM_IOCTL_FENCE_WAIT, &arg); if (ret) return ret; fence->fence_class = arg.fence_class; fence->type = arg.type; fence->signaled = arg.signaled; return arg.error; } static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf) { buf->handle = rep->handle; buf->flags = rep->flags; buf->size = rep->size; buf->offset = rep->offset; buf->mapHandle = rep->arg_handle; buf->mask = rep->mask; buf->start = rep->buffer_start; buf->fenceFlags = rep->fence_flags; buf->replyFlags = rep->rep_flags; buf->pageAlignment = rep->page_alignment; buf->tileInfo = rep->tile_info; buf->hwTileStride = rep->hw_tile_stride; buf->desiredTileStride = rep->desired_tile_stride; } int drmBOCreate(int fd, unsigned long size, unsigned pageAlignment, void *user_buffer, uint64_t mask, unsigned hint, drmBO *buf) { struct drm_bo_create_arg arg; struct drm_bo_create_req *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; int ret; memset(buf, 0, sizeof(*buf)); memset(&arg, 0, sizeof(arg)); req->mask = mask; req->hint = hint; req->size = size; req->page_alignment = pageAlignment; req->buffer_start = (unsigned long) user_buffer; buf->virtual = NULL; ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_CREATE, &arg); if (ret) return ret; drmBOCopyReply(rep, buf); buf->virtual = user_buffer; buf->mapCount = 0; return 0; } int drmBOReference(int fd, unsigned handle, drmBO *buf) { struct drm_bo_reference_info_arg arg; struct drm_bo_handle_arg *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; memset(&arg, 0, sizeof(arg)); req->handle = handle; if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg)) return -errno; drmBOCopyReply(rep, buf); buf->mapVirtual = NULL; buf->mapCount = 0; buf->virtual = NULL; return 0; } int drmBOUnreference(int fd, drmBO *buf) { struct drm_bo_handle_arg arg; if (buf->mapVirtual && buf->mapHandle) { (void) munmap(buf->mapVirtual, buf->start + buf->size); buf->mapVirtual = NULL; buf->virtual = NULL; } memset(&arg, 0, sizeof(arg)); arg.handle = buf->handle; if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg)) return -errno; buf->handle = 0; return 0; } /* * Flags can be DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE or'ed together * Hint currently be DRM_BO_HINT_DONT_BLOCK, which makes the * call return an -EBUSY if it can' immediately honor the mapping request. */ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, void **address) { struct drm_bo_map_wait_idle_arg arg; struct drm_bo_info_req *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; int ret = 0; /* * Make sure we have a virtual address of the buffer. */ if (!buf->virtual) { drmAddress virtual; virtual = mmap(0, buf->size + buf->start, PROT_READ | PROT_WRITE, MAP_SHARED, fd, buf->mapHandle); if (virtual == MAP_FAILED) { ret = -errno; } if (ret) return ret; buf->mapVirtual = virtual; buf->virtual = ((char *) virtual) + buf->start; } memset(&arg, 0, sizeof(arg)); req->handle = buf->handle; req->mask = mapFlags; req->hint = mapHint; /* * May hang if the buffer object is busy. * This IOCTL synchronizes the buffer. */ ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_MAP, &arg); if (ret) return ret; drmBOCopyReply(rep, buf); buf->mapFlags = mapFlags; ++buf->mapCount; *address = buf->virtual; return 0; } int drmBOUnmap(int fd, drmBO *buf) { struct drm_bo_handle_arg arg; memset(&arg, 0, sizeof(arg)); arg.handle = buf->handle; if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) { return -errno; } buf->mapCount--; return 0; } int drmBOSetStatus(int fd, drmBO *buf, uint64_t flags, uint64_t mask, unsigned int hint, unsigned int desired_tile_stride, unsigned int tile_info) { struct drm_bo_map_wait_idle_arg arg; struct drm_bo_info_req *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; int ret = 0; memset(&arg, 0, sizeof(arg)); req->mask = mask; req->flags = flags; req->handle = buf->handle; req->hint = hint; req->desired_tile_stride = desired_tile_stride; req->tile_info = tile_info; ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_SETSTATUS, &arg); if (ret) return ret; drmBOCopyReply(rep, buf); return 0; } int drmBOInfo(int fd, drmBO *buf) { struct drm_bo_reference_info_arg arg; struct drm_bo_handle_arg *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; int ret = 0; memset(&arg, 0, sizeof(arg)); req->handle = buf->handle; ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg); if (ret) return -errno; drmBOCopyReply(rep, buf); return 0; } int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint) { struct drm_bo_map_wait_idle_arg arg; struct drm_bo_info_req *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; int ret = 0; if ((buf->flags & DRM_BO_FLAG_SHAREABLE) || (buf->replyFlags & DRM_BO_REP_BUSY)) { memset(&arg, 0, sizeof(arg)); req->handle = buf->handle; req->hint = hint; ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_WAIT_IDLE, &arg); if (ret) return ret; drmBOCopyReply(rep, buf); } return 0; } int drmBOBusy(int fd, drmBO *buf, int *busy) { if (!(buf->flags & DRM_BO_FLAG_SHAREABLE) && !(buf->replyFlags & DRM_BO_REP_BUSY)) { *busy = 0; return 0; } else { int ret = drmBOInfo(fd, buf); if (ret) return ret; *busy = (buf->replyFlags & DRM_BO_REP_BUSY); return 0; } } int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize, unsigned memType) { struct drm_mm_init_arg arg; memset(&arg, 0, sizeof(arg)); arg.magic = DRM_BO_INIT_MAGIC; arg.major = DRM_BO_INIT_MAJOR; arg.minor = DRM_BO_INIT_MINOR; arg.p_offset = pOffset; arg.p_size = pSize; arg.mem_type = memType; if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg)) return -errno; return 0; } int drmMMTakedown(int fd, unsigned memType) { struct drm_mm_type_arg arg; memset(&arg, 0, sizeof(arg)); arg.mem_type = memType; if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg)) return -errno; return 0; } /* * If this function returns an error, and lockBM was set to 1, * the buffer manager is NOT locked. */ int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict) { struct drm_mm_type_arg arg; memset(&arg, 0, sizeof(arg)); arg.mem_type = memType; arg.lock_flags |= (lockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0; arg.lock_flags |= (ignoreNoEvict) ? DRM_BO_LOCK_IGNORE_NO_EVICT : 0; return drmIoctlTimeout(fd, DRM_IOCTL_MM_LOCK, &arg); } int drmMMUnlock(int fd, unsigned memType, int unlockBM) { struct drm_mm_type_arg arg; memset(&arg, 0, sizeof(arg)); arg.mem_type = memType; arg.lock_flags |= (unlockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0; return drmIoctlTimeout(fd, DRM_IOCTL_MM_UNLOCK, &arg); } int drmBOVersion(int fd, unsigned int *major, unsigned int *minor, unsigned int *patchlevel) { struct drm_bo_version_arg arg; int ret; memset(&arg, 0, sizeof(arg)); ret = ioctl(fd, DRM_IOCTL_BO_VERSION, &arg); if (ret) return -errno; if (major) *major = arg.major; if (minor) *minor = arg.minor; if (patchlevel) *patchlevel = arg.patchlevel; return 0; } #define DRM_MAX_FDS 16 static struct { char *BusID; int fd; int refcount; } connection[DRM_MAX_FDS]; static int nr_fds = 0; int drmOpenOnce(void *unused, const char *BusID, int *newlyopened) { int i; int fd; for (i = 0; i < nr_fds; i++) if (strcmp(BusID, connection[i].BusID) == 0) { connection[i].refcount++; *newlyopened = 0; return connection[i].fd; } fd = drmOpen(unused, BusID); if (fd <= 0 || nr_fds == DRM_MAX_FDS) return fd; connection[nr_fds].BusID = strdup(BusID); connection[nr_fds].fd = fd; connection[nr_fds].refcount = 1; *newlyopened = 1; if (0) fprintf(stderr, "saved connection %d for %s %d\n", nr_fds, connection[nr_fds].BusID, strcmp(BusID, connection[nr_fds].BusID)); nr_fds++; return fd; } void drmCloseOnce(int fd) { int i; for (i = 0; i < nr_fds; i++) { if (fd == connection[i].fd) { if (--connection[i].refcount == 0) { drmClose(connection[i].fd); free(connection[i].BusID); if (i < --nr_fds) connection[i] = connection[nr_fds]; return; } } } }