/* * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef VIA_3D_REG_H #define VIA_3D_REG_H #define HC_REG_BASE 0x0400 #define HC_REG_TRANS_SPACE 0x0040 #define HC_ParaN_MASK 0xffffffff #define HC_Para_MASK 0x00ffffff #define HC_SubA_MASK 0xff000000 #define HC_SubA_SHIFT 24 /* Transmission Setting */ #define HC_REG_TRANS_SET 0x003c #define HC_ParaSubType_MASK 0xff000000 #define HC_ParaType_MASK 0x00ff0000 #define HC_ParaOS_MASK 0x0000ff00 #define HC_ParaAdr_MASK 0x000000ff #define HC_ParaSubType_SHIFT 24 #define HC_ParaType_SHIFT 16 #define HC_ParaOS_SHIFT 8 #define HC_ParaAdr_SHIFT 0 #define HC_ParaType_CmdVdata 0x0000 #define HC_ParaType_NotTex 0x0001 #define HC_ParaType_Tex 0x0002 #define HC_ParaType_Palette 0x0003 #define HC_ParaType_PreCR 0x0010 #define HC_ParaType_Auto 0x00fe /* Transmission Space */ #define HC_REG_Hpara0 0x0040 #define HC_REG_HpataAF 0x02fc /* Read */ #define HC_REG_HREngSt 0x0000 #define HC_REG_HRFIFOempty 0x0004 #define HC_REG_HRFIFOfull 0x0008 #define HC_REG_HRErr 0x000c #define HC_REG_FIFOstatus 0x0010 /* HC_REG_HREngSt 0x0000 */ #define HC_HDASZC_MASK 0x00010000 #define HC_HSGEMI_MASK 0x0000f000 #define HC_HLGEMISt_MASK 0x00000f00 #define HC_HCRSt_MASK 0x00000080 #define HC_HSE0St_MASK 0x00000040 #define HC_HSE1St_MASK 0x00000020 #define HC_HPESt_MASK 0x00000010 #define HC_HXESt_MASK 0x00000008 #define HC_HBESt_MASK 0x00000004 #define HC_HE2St_MASK 0x00000002 #define HC_HE3St_MASK 0x00000001 /* HC_REG_HRFIFOempty 0x0004 */ #define HC_HRZDempty_MASK 0x00000010 #define HC_HRTXAempty_MASK 0x00000008 #define HC_HRTXDempty_MASK 0x00000004 #define HC_HWZDempty_MASK 0x00000002 #define HC_HWCDempty_MASK 0x00000001 /* HC_REG_HRFIFOfull 0x0008 */ #define HC_HRZDfull_MASK 0x00000010 #define HC_HRTXAfull_MASK 0x00000008 #define HC_HRTXDfull_MASK 0x00000004 #define HC_HWZDfull_MASK 0x00000002 #define HC_HWCDfull_MASK 0x00000001 /* HC_REG_HRErr 0x000c */ #define HC_HAGPCMErr_MASK 0x80000000 #define HC_HAGPCMErrC_MASK 0x70000000 /* HC_REG_FIFOstatus 0x0010 */ #define HC_HRFIFOATall_MASK 0x80000000 #define HC_HRFIFOATbusy_MASK 0x40000000 #define HC_HRATFGMDo_MASK 0x00000100 #define HC_HRATFGMDi_MASK 0x00000080 #define HC_HRATFRZD_MASK 0x00000040 #define HC_HRATFRTXA_MASK 0x00000020 #define HC_HRATFRTXD_MASK 0x00000010 #define HC_HRATFWZD_MASK 0x00000008 #define HC_HRATFWCD_MASK 0x00000004 #define HC_HRATTXTAG_MASK 0x00000002 #define HC_HRATTXCH_MASK 0x00000001 /* AGP Command Setting */ #define HC_SubA_HAGPBstL 0x0060 #define HC_SubA_HAGPBendL 0x0061 #define HC_SubA_HAGPCMNT 0x0062 #define HC_SubA_HAGPBpL 0x0063 #define HC_SubA_HAGPBpH 0x0064 /* HC_SubA_HAGPCMNT 0x0062 */ #define HC_HAGPCMNT_MASK 0x00800000 #define HC_HCmdErrClr_MASK 0x00400000 #define HC_HAGPBendH_MASK 0x0000ff00 #define HC_HAGPBstH_MASK 0x000000ff #define HC_HAGPBendH_SHIFT 8 #define HC_HAGPBstH_SHIFT 0 /* HC_SubA_HAGPBpL 0x0063 */ #define HC_HAGPBpL_MASK 0x00fffffc #define HC_HAGPBpID_MASK 0x00000003 #define HC_HAGPBpID_PAUSE 0x00000000 #define HC_HAGPBpID_JUMP 0x00000001 #define HC_HAGPBpID_STOP 0x00000002 /* HC_SubA_HAGPBpH 0x0064 */ #define HC_HAGPBpH_MASK 0x00ffffff /* Miscellaneous Settings */ #define HC_SubA_HClipTB 0x0070 #define HC_SubA_HClipLR 0x0071 #define HC_SubA_HFPClipTL 0x0072 #define HC_SubA_HFPClipBL 0x0073 #define HC_SubA_HFPClipLL 0x0074 #define HC_SubA_HFPClipRL 0x0075 #define HC_SubA_HFPClipTBH 0x0076 #define HC_SubA_HFPClipLRH 0x0077 #define HC_SubA_HLP 0x0078 #define HC_SubA_HLPRF 0x0079 #define HC_SubA_HSolidCL 0x007a #define HC_SubA_HPixGC 0x007b #define HC_SubA_HSPXYOS 0x007c #define HC_SubA_HVertexCNT 0x007d #define HC_HClipT_MASK 0x00fff000 #define HC_HClipT_SHIFT 12 #define HC_HClipB_MASK 0x00000fff #define HC_HClipB_SHIFT 0 #define HC_HClipL_MASK 0x00fff000 #define HC_HClipL_SHIFT 12 #define HC_HClipR_MASK 0x00000fff #define HC_HClipR_SHIFT 0 #define HC_HFPClipBH_MASK 0x0000ff00 #define HC_HFPClipBH_SHIFT 8 #define HC_HFPClipTH_MASK 0x000000ff #define HC_HFPClipTH_SHIFT 0 #define HC_HFPClipRH_MASK 0x0000ff00 #define HC_HFPClipRH_SHIFT 8 #define HC_HFPClipLH_MASK 0x000000ff #define HC_HFPClipLH_SHIFT 0 #define HC_HSolidCH_MASK 0x000000ff #define HC_HPixGC_MASK 0x00800000 #define HC_HSPXOS_MASK 0x00fff000 #define HC_HSPXOS_SHIFT 12 #define HC_HSPYOS_MASK 0x00000fff /* Command * Command A */ #define HC_HCmdHeader_MASK 0xfe000000 /*0xffe00000 */ #define HC_HE3Fire_MASK 0x00100000 #define HC_HPMType_MASK 0x000f0000 #define HC_HEFlag_MASK 0x0000e000 #define HC_HShading_MASK 0x00001c00 #define HC_HPMValidN_MASK 0x00000200 #define HC_HPLEND_MASK 0x00000100 #define HC_HVCycle_MASK 0x000000ff #define HC_HVCycle_Style_MASK 0x000000c0 #define HC_HVCycle_ChgA_MASK 0x00000030 #define HC_HVCycle_ChgB_MASK 0x0000000c #define HC_HVCycle_ChgC_MASK 0x00000003 #define HC_HPMType_Point 0x00000000 #define HC_HPMType_Line 0x00010000 #define HC_HPMType_Tri 0x00020000 #define HC_HPMType_TriWF 0x00040000 #define HC_HEFlag_NoAA 0x00000000 #define HC_HEFlag_ab 0x00008000 #define HC_HEFlag_bc 0x00004000 #define HC_HEFlag_ca 0x00002000 #define HC_HShading_Solid 0x00000000 #define HC_HShading_FlatA 0x00000400 #define HC_HShading_FlatB 0x00000800 #define HC_HShading_FlatC 0x00000c00 #define HC_HShading_Gouraud 0x00001000 #define HC_HVCycle_Full 0x00000000 #define HC_HVCycle_AFP 0x00000040 #define HC_HVCycle_One 0x000000c0 #define HC_HVCycle_NewA 0x00000000 #define HC_HVCycle_AA 0x00000010 #define HC_HVCycle_AB 0x00000020 #define HC_HVCycle_AC 0x00000030 #define HC_HVCycle_NewB 0x00000000 #define HC_HVCycle_BA 0x00000004 #define HC_HVCycle_BB 0x00000008 #define HC_HVCycle_BC 0x0000000c #define HC_HVCycle_NewC 0x00000000 #define HC_HVCycle_CA 0x00000001 #define HC_HVCycle_CB 0x00000002 #define HC_HVCycle_CC 0x00000003 /* Command B */ #define HC_HLPrst_MASK 0x00010000 #define HC_HLLastP_MASK 0x00008000 #define HC_HVPMSK_MASK 0x00007f80 #define HC_HBFace_MASK 0x00000040 #define HC_H2nd1VT_MASK 0x0000003f #define HC_HVPMSK_X 0x00004000 #define HC_HVPMSK_Y 0x00002000 #define HC_HVPMSK_Z 0x00001000 #define HC_HVPMSK_W 0x00000800 #define HC_HVPMSK_Cd 0x00000400 #define HC_HVPMSK_Cs 0x00000200 #define HC_HVPMSK_S 0x00000100 #define HC_HVPMSK_T 0x00000080 /* Enable Setting */ #define HC_SubA_HEnable 0x0000 #define HC_HenTXEnvMap_MASK 0x00200000 #define HC_HenVertexCNT_MASK 0x00100000 #define HC_HenCPUDAZ_MASK 0x00080000 #define HC_HenDASZWC_MASK 0x00040000 #define HC_HenFBCull_MASK 0x00020000 #define HC_HenCW_MASK 0x00010000 #define HC_HenAA_MASK 0x00008000 #define HC_HenST_MASK 0x00004000 #define HC_HenZT_MASK 0x00002000 #define HC_HenZW_MASK 0x00001000 #define HC_HenAT_MASK 0x00000800 #define HC_HenAW_MASK 0x00000400 #define HC_HenSP_MASK 0x00000200 #define HC_HenLP_MASK 0x00000100 #define HC_HenTXCH_MASK 0x00000080 #define HC_HenTXMP_MASK 0x00000040 #define HC_HenTXPP_MASK 0x00000020 #define HC_HenTXTR_MASK 0x00000010 #define HC_HenCS_MASK 0x00000008 #define HC_HenFOG_MASK 0x00000004 #define HC_HenABL_MASK 0x00000002 #define HC_HenDT_MASK 0x00000001 /* Z Setting */ #define HC_SubA_HZWBBasL 0x0010 #define HC_SubA_HZWBBasH 0x0011 #define HC_SubA_HZWBType 0x0012 #define HC_SubA_HZBiasL 0x0013 #define HC_SubA_HZWBend 0x0014 #define HC_SubA_HZWTMD 0x0015 #define HC_SubA_HZWCDL 0x0016 #define HC_SubA_HZWCTAGnum 0x0017 #define HC_SubA_HZCYNum 0x0018 #define HC_SubA_HZWCFire 0x0019 /* HC_SubA_HZWBType */ #define HC_HZWBType_MASK 0x00800000 #define HC_HZBiasedWB_MASK 0x00400000 #define HC_HZONEasFF_MASK 0x00200000 #define HC_HZOONEasFF_MASK 0x00100000 #define HC_HZWBFM_MASK 0x00030000 #define HC_HZWBLoc_MASK 0x0000c000 #define HC_HZWBPit_MASK 0x00003fff #define HC_HZWBFM_16 0x00000000 #define HC_HZWBFM_32 0x00020000 #define HC_HZWBFM_24 0x00030000 #define HC_HZWBLoc_Local 0x00000000 #define HC_HZWBLoc_SyS 0x00004000 /* HC_SubA_HZWBend */ #define HC_HZWBend_MASK 0x00ffe000 #define HC_HZBiasH_MASK 0x000000ff #define HC_HZWBend_SHIFT 10 /* HC_SubA_HZWTMD */ #define HC_HZWTMD_MASK 0x00070000 #define HC_HEBEBias_MASK 0x00007f00 #define HC_HZNF_MASK 0x000000ff #define HC_HZWTMD_NeverPass 0x00000000 #define HC_HZWTMD_LT 0x00010000 #define HC_HZWTMD_EQ 0x00020000 #define HC_HZWTMD_LE 0x00030000 #define HC_HZWTMD_GT 0x00040000 #define HC_HZWTMD_NE 0x00050000 #define HC_HZWTMD_GE 0x00060000 #define HC_HZWTMD_AllPass 0x00070000 #define HC_HEBEBias_SHIFT 8 /* HC_SubA_HZWCDL 0x0016 */ #define HC_HZWCDL_MASK 0x00ffffff /* HC_SubA_HZWCTAGnum 0x0017 */ #define HC_HZWCTAGnum_MASK 0x00ff0000 #define HC_HZWCTAGnum_SHIFT 16 #define HC_HZWCDH_MASK 0x000000ff #define HC_HZWCDH_SHIFT 0 /* HC_SubA_HZCYNum 0x0018 */ #define HC_HZCYNum_MASK 0x00030000 #define HC_HZCYNum_SHIFT 16 #define HC_HZWCQWnum_MASK 0x00003fff #define HC_HZWCQWnum_SHIFT 0 /* HC_SubA_HZWCFire 0x0019 */ #define HC_ZWCFire_MASK 0x00010000 #define HC_HZWCQWnumLast_MASK 0x00003fff #define HC_HZWCQWnumLast_SHIFT 0 /* Stencil Setting */ #define HC_SubA_HSTREF 0x0023 #define HC_SubA_HSTMD 0x0024 /* HC_SubA_HSBFM */ #define HC_HSBFM_MASK 0x00030000 #define HC_HSBLoc_MASK 0x0000c000 #define HC_HSBPit_MASK 0x00003fff /* HC_SubA_HSTREF */ #define HC_HSTREF_MASK 0x00ff0000 #define HC_HSTOPMSK_MASK 0x0000ff00 #define HC_HSTBMSK_MASK 0x000000ff #define HC_HSTREF_SHIFT 16 #define HC_HSTOPMSK_SHIFT 8 /* HC_SubA_HSTMD */ #define HC_HSTMD_MASK 0x00070000 #define HC_HSTOPSF_MASK 0x000001c0 #define HC_HSTOPSPZF_MASK 0x00000038 #define HC_HSTOPSPZP_MASK 0x00000007 #define HC_HSTMD_NeverPass 0x00000000 #define HC_HSTMD_LT 0x00010000 #define HC_HSTMD_EQ 0x00020000 #define HC_HSTMD_LE 0x00030000 #define HC_HSTMD_GT 0x00040000 #define HC_HSTMD_NE 0x00050000 #define HC_HSTMD_GE 0x00060000 #define HC_HSTMD_AllPass 0x00070000 #define HC_HSTOPSF_KEEP 0x00000000 #define HC_HSTOPSF_ZERO 0x00000040 #define HC_HSTOPSF_REPLACE 0x00000080 #define HC_HSTOPSF_INCRSAT 0x000000c0 #define HC_HSTOPSF_DECRSAT 0x00000100 #define HC_HSTOPSF_INVERT 0x00000140 #define HC_HSTOPSF_INCR 0x00000180 #define HC_HSTOPSF_DECR 0x000001c0 #define HC_HSTOPSPZF_KEEP 0x00000000 #define HC_HSTOPSPZF_ZERO 0x00000008 #define HC_HSTOPSPZF_REPLACE 0x00000010 #define HC_HSTOPSPZF_INCRSAT 0x00000018 #define HC_HSTOPSPZF_DECRSAT 0x00000020 #define HC_HSTOPSPZF_INVERT 0x00000028 #define HC_HSTOPSPZF_INCR 0x00000030 #define HC_HSTOPSPZF_DECR 0x00000038 #define HC_HSTOPSPZP_KEEP 0x00000000 #define HC_HSTOPSPZP_ZERO 0x00000001 #define HC_HSTOPSPZP_REPLACE 0x00000002 #define HC_HSTOPSPZP_INCRSAT 0x00000003 #define HC_HSTOPSPZP_DECRSAT 0x00000004 #define HC_HSTOPSPZP_INVERT 0x00000005 #define HC_HSTOPSPZP_INCR 0x00000006 #define HC_HSTOPSPZP_DECR 0x00000007 /* Alpha Setting */ #define HC_SubA_HABBasL 0x0030 #define HC_SubA_HABBasH 0x0031 #define HC_SubA_HABFM 0x0032 #define HC_SubA_HATMD 0x0033 #define HC_SubA_HABLCsat 0x0034 #define HC_SubA_HABLCop 0x0035 #define HC_SubA_HABLAsat 0x0036 #define HC_SubA_HABLAop 0x0037 #define HC_SubA_HABLRCa 0x0038 #define HC_SubA_HABLRFCa 0x0039 #define HC_SubA_HABLRCbias 0x003a #define HC_SubA_HABLRCb 0x003b #define HC_SubA_HABLRFCb 0x003c #define HC_SubA_HABLRAa 0x003d #define HC_SubA_HABLRAb 0x003e /* HC_SubA_HABFM */ #define HC_HABFM_MASK 0x00030000 #define HC_HABLoc_MASK 0x0000c000 #define HC_HABPit_MASK 0x000007ff /* HC_SubA_HATMD */ #define HC_HATMD_MASK 0x00000700 #define HC_HATREF_MASK 0x000000ff #define HC_HATMD_NeverPass 0x00000000 #define HC_HATMD_LT 0x00000100 #define HC_HATMD_EQ 0x00000200 #define HC_HATMD_LE 0x00000300 #define HC_HATMD_GT 0x00000400 #define HC_HATMD_NE 0x00000500 #define HC_HATMD_GE 0x00000600 #define HC_HATMD_AllPass 0x00000700 /* HC_SubA_HABLCsat */ #define HC_HABLCsat_MASK 0x00010000 #define HC_HABLCa_MASK 0x0000fc00 #define HC_HABLCa_C_MASK 0x0000c000 #define HC_HABLCa_OPC_MASK 0x00003c00 #define HC_HABLFCa_MASK 0x000003f0 #define HC_HABLFCa_C_MASK 0x00000300 #define HC_HABLFCa_OPC_MASK 0x000000f0 #define HC_HABLCbias_MASK 0x0000000f #define HC_HABLCbias_C_MASK 0x00000008 #define HC_HABLCbias_OPC_MASK 0x00000007 /*-- Define the input color. */ #define HC_XC_Csrc 0x00000000 #define HC_XC_Cdst 0x00000001 #define HC_XC_Asrc 0x00000002 #define HC_XC_Adst 0x00000003 #define HC_XC_Fog 0x00000004 #define HC_XC_HABLRC 0x00000005 #define HC_XC_minSrcDst 0x00000006 #define HC_XC_maxSrcDst 0x00000007 #define HC_XC_mimAsrcInvAdst 0x00000008 #define HC_XC_OPC 0x00000000 #define HC_XC_InvOPC 0x00000010 #define HC_XC_OPCp5 0x00000020 /*-- Define the input Alpha */ #define HC_XA_OPA 0x00000000 #define HC_XA_InvOPA 0x00000010 #define HC_XA_OPAp5 0x00000020 #define HC_XA_0 0x00000000 #define HC_XA_Asrc 0x00000001 #define HC_XA_Adst 0x00000002 #define HC_XA_Fog 0x00000003 #define HC_XA_minAsrcFog 0x00000004 #define HC_XA_minAsrcAdst 0x00000005 #define HC_XA_maxAsrcFog 0x00000006 #define HC_XA_maxAsrcAdst 0x00000007 #define HC_XA_HABLRA 0x00000008 #define HC_XA_minAsrcInvAdst 0x00000008 #define HC_XA_HABLFRA 0x00000009 /*-- */ #define HC_HABLCa_OPC (HC_XC_OPC << 10) #define HC_HABLCa_InvOPC (HC_XC_InvOPC << 10) #define HC_HABLCa_OPCp5 (HC_XC_OPCp5 << 10) #define HC_HABLCa_Csrc (HC_XC_Csrc << 10) #define HC_HABLCa_Cdst (HC_XC_Cdst << 10) #define HC_HABLCa_Asrc (HC_XC_Asrc << 10) #define HC_HABLCa_Adst (HC_XC_Adst << 10) #define HC_HABLCa_Fog (HC_XC_Fog << 10) #define HC_HABLCa_HABLRCa (HC_XC_HABLRC << 10) #define HC_HABLCa_minSrcDst (HC_XC_minSrcDst << 10) #define HC_HABLCa_maxSrcDst (HC_XC_maxSrcDst << 10) #define HC_HABLFCa_OPC (HC_XC_OPC << 4) #define HC_HABLFCa_InvOPC (HC_XC_InvOPC << 4) #define HC_HABLFCa_OPCp5 (HC_XC_OPCp5 << 4) #define HC_HABLFCa_Csrc (HC_XC_Csrc << 4) #define HC_HABLFCa_Cdst (HC_XC_Cdst << 4) #define HC_HABLFCa_Asrc (HC_XC_Asrc << 4) #define HC_HABLFCa_Adst (HC_XC_Adst << 4) #define HC_HABLFCa_Fog (HC_XC_Fog << 4) #define HC_HABLFCa_HABLRCa (HC_XC_HABLRC << 4) #define HC_HABLFCa_minSrcDst (HC_XC_minSrcDst << 4) #define HC_HABLFCa_maxSrcDst (HC_XC_maxSrcDst << 4) #define HC_HABLFCa_mimAsrcInvAdst (HC_XC_mimAsrcInvAdst << 4) #define HC_HABLCbias_HABLRCbias 0x00000000 #define HC_HABLCbias_Asrc 0x00000001 #define HC_HABLCbias_Adst 0x00000002 #define HC_HABLCbias_Fog 0x00000003 #define HC_HABLCbias_Cin 0x00000004 /* HC_SubA_HABLCop 0x0035 */ #define HC_HABLdot_MASK 0x00010000 #define HC_HABLCop_MASK 0x00004000 #define HC_HABLCb_MASK 0x00003f00 #define HC_HABLCb_C_MASK 0x00003000 #define HC_HABLCb_OPC_MASK 0x00000f00 #define HC_HABLFCb_MASK 0x000000fc #define HC_HABLFCb_C_MASK 0x000000c0 #define HC_HABLFCb_OPC_MASK 0x0000003c #define HC_HABLCshift_MASK 0x00000003 #define HC_HABLCb_OPC (HC_XC_OPC << 8) #define HC_HABLCb_InvOPC (HC_XC_InvOPC << 8) #define HC_HABLCb_OPCp5 (HC_XC_OPCp5 << 8) #define HC_HABLCb_Csrc (HC_XC_Csrc << 8) #define HC_HABLCb_Cdst (HC_XC_Cdst << 8) #define HC_HABLCb_Asrc (HC_XC_Asrc << 8) #define HC_HABLCb_Adst (HC_XC_Adst << 8) #define HC_HABLCb_Fog (HC_XC_Fog << 8) #define HC_HABLCb_HABLRCa (HC_XC_HABLRC << 8) #define HC_HABLCb_minSrcDst (HC_XC_minSrcDst << 8) #define HC_HABLCb_maxSrcDst (HC_XC_maxSrcDst << 8) #define HC_HABLFCb_OPC (HC_XC_OPC << 2) #define HC_HABLFCb_InvOPC (HC_XC_InvOPC << 2) #define HC_HABLFCb_OPCp5 (HC_XC_OPCp5 << 2) #define HC_HABLFCb_Csrc (HC_XC_Csrc << 2) #define HC_HABLFCb_Cdst (HC_XC_Cdst << 2) #define HC_HABLFCb_Asrc (HC_XC_Asrc << 2) #define HC_HABLFCb_Adst (HC_XC_Adst << 2) #define HC_HABLFCb_Fog (HC_XC_Fog << 2) #define HC_HABLFCb_HABLRCb (HC_XC_HABLRC << 2) #define HC_HABLFCb_minSrcDst (HC_XC_minSrcDst << 2) #define HC_HABLFCb_maxSrcDst (HC_XC_maxSrcDst << 2) #define HC_HABLFCb_mimAsrcInvAdst (HC_XC_mimAsrcInvAdst << 2) /* HC_SubA_HABLAsat 0x0036 */ #define HC_HABLAsat_MASK 0x00010000 #define HC_HABLAa_MASK 0x0000fc00 #define HC_HABLAa_A_MASK 0x0000c000 #define HC_HABLAa_OPA_MASK 0x00003c00 #define HC_HABLFAa_MASK 0x000003f0 #define HC_HABLFAa_A_MASK 0x00000300 #define HC_HABLFAa_OPA_MASK 0x000000f0 #define HC_HABLAbias_MASK 0x0000000f #define HC_HABLAbias_A_MASK 0x00000008 #define HC_HABLAbias_OPA_MASK 0x00000007 #define HC_HABLAa_OPA (HC_XA_OPA << 10) #define HC_HABLAa_InvOPA (HC_XA_InvOPA << 10) #define HC_HABLAa_OPAp5 (HC_XA_OPAp5 << 10) #define HC_HABLAa_0 (HC_XA_0 << 10) #define HC_HABLAa_Asrc (HC_XA_Asrc << 10) #define HC_HABLAa_Adst (HC_XA_Adst << 10) #define HC_HABLAa_Fog (HC_XA_Fog << 10) #define HC_HABLAa_minAsrcFog (HC_XA_minAsrcFog << 10) #define HC_HABLAa_minAsrcAdst (HC_XA_minAsrcAdst << 10) #define HC_HABLAa_maxAsrcFog (HC_XA_maxAsrcFog << 10) #define HC_HABLAa_maxAsrcAdst (HC_XA_maxAsrcAdst << 10) #define HC_HABLAa_HABLRA (HC_XA_HABLRA << 10) #define HC_HABLFAa_OPA (HC_XA_OPA << 4) #define HC_HABLFAa_InvOPA (HC_XA_InvOPA << 4) #define HC_HABLFAa_OPAp5 (HC_XA_OPAp5 << 4) #define HC_HABLFAa_0 (HC_XA_0 << 4) #define HC_HABLFAa_Asrc (HC_XA_Asrc << 4) #define HC_HABLFAa_Adst (HC_XA_Adst << 4) #define HC_HABLFAa_Fog (HC_XA_Fog << 4) #define HC_HABLFAa_minAsrcFog (HC_XA_minAsrcFog << 4) #define HC_HABLFAa_minAsrcAdst (HC_XA_minAsrcAdst << 4) #define HC_HABLFAa_maxAsrcFog (HC_XA_maxAsrcFog << 4) #define HC_HABLFAa_maxAsrcAdst (HC_XA_maxAsrcAdst << 4) #define HC_HABLFAa_minAsrcInvAdst (HC_XA_minAsrcInvAdst << 4) #define HC_HABLFAa_HABLFRA (HC_XA_HABLFRA << 4) #define HC_HABLAbias_HABLRAbias 0x00000000 #define HC_HABLAbias_Asrc 0x00000001 #define HC_HABLAbias_Adst 0x00000002 #define HC_HABLAbias_Fog 0x00000003 #define HC_HABLAbias_Aaa 0x00000004 /* HC_SubA_HABLAop 0x0037 */ #define HC_HABLAop_MASK 0x00004000 #define HC_HABLAb_MASK 0x00003f00 #define HC_HABLAb_OPA_MASK 0x00000f00 #define HC_HABLFAb_MASK 0x000000fc #define HC_HABLFAb_OPA_MASK 0x0000003c #define HC_HABLAshift_MASK 0x00000003 #define HC_HABLAb_OPA (HC_XA_OPA << 8) #define HC_HABLAb_InvOPA (HC_XA_InvOPA << 8) #define HC_HABLAb_OPAp5 (HC_XA_OPAp5 << 8) #define HC_HABLAb_0 (HC_XA_0 << 8) #define HC_HABLAb_Asrc (HC_XA_Asrc << 8) #define HC_HABLAb_Adst (HC_XA_Adst << 8) #define HC_HABLAb_Fog (HC_XA_Fog << 8) #define HC_HABLAb_minAsrcFog (HC_XA_minAsrcFog << 8) #define HC_HABLAb_minAsrcAdst (HC_XA_minAsrcAdst << 8) #define HC_HABLAb_maxAsrcFog (HC_XA_maxAsrcFog << 8) #define HC_HABLAb_maxAsrcAdst (HC_XA_maxAsrcAdst << 8) #define HC_HABLAb_HABLRA (HC_XA_HABLRA << 8) #define HC_HABLFAb_OPA (HC_XA_OPA << 2) #define HC_HABLFAb_InvOPA (HC_XA_InvOPA << 2) #define HC_HABLFAb_OPAp5 (HC_XA_OPAp5 << 2) #define HC_HABLFAb_0 (HC_XA_0 << 2) #define HC_HABLFAb_Asrc (HC_XA_Asrc << 2) #define HC_HABLFAb_Adst (HC_XA_Adst << 2) #define HC_HABLFAb_Fog (HC_XA_Fog << 2) #define HC_HABLFAb_minAsrcFog (HC_XA_minAsrcFog << 2) #define HC_HABLFAb_minAsrcAdst (HC_XA_minAsrcAdst << 2) #define HC_HABLFAb_maxAsrcFog (HC_XA_maxAsrcFog << 2) #define HC_HABLFAb_maxAsrcAdst (HC_XA_maxAsrcAdst << 2) #define HC_HABLFAb_minAsrcInvAdst (HC_XA_minAsrcInvAdst << 2) #define HC_HABLFAb_HABLFRA (HC_XA_HABLFRA << 2) /* HC_SubA_HABLRAa 0x003d */ #define HC_HABLRAa_MASK 0x00ff0000 #define HC_HABLRFAa_MASK 0x0000ff00 #define HC_HABLRAbias_MASK 0x000000ff #define HC_HABLRAa_SHIFT 16 #define HC_HABLRFAa_SHIFT 8 /* HC_SubA_HABLRAb 0x003e */ #define HC_HABLRAb_MASK 0x0000ff00 #define HC_HABLRFAb_MASK 0x000000ff #define HC_HABLRAb_SHIFT 8 /* Destination Setting */ #define HC_SubA_HDBBasL 0x0040 #define HC_SubA_HDBBasH 0x0041 #define HC_SubA_HDBFM 0x0042 #define HC_SubA_HFBBMSKL 0x0043 #define HC_SubA_HROP 0x0044 /* HC_SubA_HDBFM 0x0042 */ #define HC_HDBFM_MASK 0x001f0000 #define HC_HDBLoc_MASK 0x0000c000 #define HC_HDBPit_MASK 0x00003fff #define HC_HDBFM_RGB555 0x00000000 #define HC_HDBFM_RGB565 0x00010000 #define HC_HDBFM_ARGB4444 0x00020000 #define HC_HDBFM_ARGB1555 0x00030000 #define HC_HDBFM_BGR555 0x00040000 #define HC_HDBFM_BGR565 0x00050000 #define HC_HDBFM_ABGR4444 0x00060000 #define HC_HDBFM_ABGR1555 0x00070000 #define HC_HDBFM_ARGB0888 0x00080000 #define HC_HDBFM_ARGB8888 0x00090000 #define HC_HDBFM_ABGR0888 0x000a0000 #define HC_HDBFM_ABGR8888 0x000b0000 #define HC_HDBLoc_Local 0x00000000 #define HC_HDBLoc_Sys 0x00004000 /* HC_SubA_HROP 0x0044 */ #define HC_HROP_MASK 0x00000f00 #define HC_HFBBMSKH_MASK 0x000000ff #define HC_HROP_BLACK 0x00000000 #define HC_HROP_DPon 0x00000100 #define HC_HROP_DPna 0x00000200 #define HC_HROP_Pn 0x00000300 #define HC_HROP_PDna 0x00000400 #define HC_HROP_Dn 0x00000500 #define HC_HROP_DPx 0x00000600 #define HC_HROP_DPan 0x00000700 #define HC_HROP_DPa 0x00000800 #define HC_HROP_DPxn 0x00000900 #define HC_HROP_D 0x00000a00 #define HC_HROP_DPno 0x00000b00 #define HC_HROP_P 0x00000c00 #define HC_HROP_PDno 0x00000d00 #define HC_HROP_DPo 0x00000e00 #define HC_HROP_WHITE 0x00000f00 /* Fog Setting */ #define HC_SubA_HFogLF 0x0050 #define HC_SubA_HFogCL 0x0051 #define HC_SubA_HFogCH 0x0052 #define HC_SubA_HFogStL 0x0053 #define HC_SubA_HFogStH 0x0054 #define HC_SubA_HFogOOdMF 0x0055 #define HC_SubA_HFogOOdEF 0x0056 #define HC_SubA_HFogEndL 0x0057 #define HC_SubA_HFogDenst 0x0058 /* HC_SubA_FogLF 0x0050 */ #define HC_FogLF_MASK 0x00000010 #define HC_FogEq_MASK 0x00000008 #define HC_FogMD_MASK 0x00000007 #define HC_FogMD_LocalFog 0x00000000 #define HC_FogMD_LinearFog 0x00000002 #define HC_FogMD_ExponentialFog 0x00000004 #define HC_FogMD_Exponential2Fog 0x00000005 /* #define HC_FogMD_FogTable 0x00000003 */ /* HC_SubA_HFogDenst 0x0058 */ #define HC_FogDenst_MASK 0x001fff00 #define HC_FogEndL_MASK 0x000000ff /* Texture subtype definitions */ #define HC_SubType_Tex0 0x00000000 #define HC_SubType_Tex1 0x00000001 #define HC_SubType_TexGeneral 0x000000fe /* Attribute of texture n */ #define HC_SubA_HTXnL0BasL 0x0000 #define HC_SubA_HTXnL1BasL 0x0001 #define HC_SubA_HTXnL2BasL 0x0002 #define HC_SubA_HTXnL3BasL 0x0003 #define HC_SubA_HTXnL4BasL 0x0004 #define HC_SubA_HTXnL5BasL 0x0005 #define HC_SubA_HTXnL6BasL 0x0006 #define HC_SubA_HTXnL7BasL 0x0007 #define HC_SubA_HTXnL8BasL 0x0008 #define HC_SubA_HTXnL9BasL 0x0009 #define HC_SubA_HTXnLaBasL 0x000a #define HC_SubA_HTXnLbBasL 0x000b #define HC_SubA_HTXnLcBasL 0x000c #define HC_SubA_HTXnLdBasL 0x000d #define HC_SubA_HTXnLeBasL 0x000e #define HC_SubA_HTXnLfBasL 0x000f #define HC_SubA_HTXnL10BasL 0x0010 #define HC_SubA_HTXnL11BasL 0x0011 #define HC_SubA_HTXnL012BasH 0x0020 #define HC_SubA_HTXnL345BasH 0x0021 #define HC_SubA_HTXnL678BasH 0x0022 #define HC_SubA_HTXnL9abBasH 0x0023 #define HC_SubA_HTXnLcdeBasH 0x0024 #define HC_SubA_HTXnLf1011BasH 0x0025 #define HC_SubA_HTXnL0Pit 0x002b #define HC_SubA_HTXnL1Pit 0x002c #define HC_SubA_HTXnL2Pit 0x002d #define HC_SubA_HTXnL3Pit 0x002e #define HC_SubA_HTXnL4Pit 0x002f #define HC_SubA_HTXnL5Pit 0x0030 #define HC_SubA_HTXnL6Pit 0x0031 #define HC_SubA_HTXnL7Pit 0x0032 #define HC_SubA_HTXnL8Pit 0x0033 #define HC_SubA_HTXnL9Pit 0x0034 #define HC_SubA_HTXnLaPit 0x0035 #define HC_SubA_HTXnLbPit 0x0036 #define HC_SubA_HTXnLcPit 0x0037 #define HC_SubA_HTXnLdPit 0x0038 #define HC_SubA_HTXnLePit 0x0039 #define HC_SubA_HTXnLfPit 0x003a #define HC_SubA_HTXnL10Pit 0x003b #define HC_SubA_HTXnL11Pit 0x003c #define HC_SubA_HTXnL0_5WE 0x004b #define HC_SubA_HTXnL6_bWE 0x004c #define HC_SubA_HTXnLc_11WE 0x004d #define HC_SubA_HTXnL0_5HE 0x0051 #define HC_SubA_HTXnL6_bHE 0x0052 #define HC_SubA_HTXnLc_11HE 0x0053 #define HC_SubA_HTXnL0OS 0x0077 #define HC_SubA_HTXnTB 0x0078 #define HC_SubA_HTXnMPMD 0x0079 #define HC_SubA_HTXnCLODu 0x007a #define HC_SubA_HTXnFM 0x007b #define HC_SubA_HTXnTRCH 0x007c #define HC_SubA_HTXnTRCL 0x007d #define HC_SubA_HTXnTBC 0x007e #define HC_SubA_HTXnTRAH 0x007f #define HC_SubA_HTXnTBLCsat 0x0080 #define HC_SubA_HTXnTBLCop 0x0081 #define HC_SubA_HTXnTBLMPfog 0x0082 #define HC_SubA_HTXnTBLAsat 0x0083 #define HC_SubA_HTXnTBLRCa 0x0085 #define HC_SubA_HTXnTBLRCb 0x0086 #define HC_SubA_HTXnTBLRCc 0x0087 #define HC_SubA_HTXnTBLRCbias 0x0088 #define HC_SubA_HTXnTBLRAa 0x0089 #define HC_SubA_HTXnTBLRFog 0x008a #define HC_SubA_HTXnBumpM00 0x0090 #define HC_SubA_HTXnBumpM01 0x0091 #define HC_SubA_HTXnBumpM10 0x0092 #define HC_SubA_HTXnBumpM11 0x0093 #define HC_SubA_HTXnLScale 0x0094 #define HC_SubA_HTXSMD 0x0000 /* HC_SubA_HTXnL012BasH 0x0020 */ #define HC_HTXnL0BasH_MASK 0x000000ff #define HC_HTXnL1BasH_MASK 0x0000ff00 #define HC_HTXnL2BasH_MASK 0x00ff0000 #define HC_HTXnL1BasH_SHIFT 8 #define HC_HTXnL2BasH_SHIFT 16 /* HC_SubA_HTXnL345BasH 0x0021 */ #define HC_HTXnL3BasH_MASK 0x000000ff #define HC_HTXnL4BasH_MASK 0x0000ff00 #define HC_HTXnL5BasH_MASK 0x00ff0000 #define HC_HTXnL4BasH_SHIFT 8 #define HC_HTXnL5BasH_SHIFT 16 /* HC_SubA_HTXnL678BasH 0x0022 */ #define HC_HTXnL6BasH_MASK 0x000000ff #define HC_HTXnL7BasH_MASK 0x0000ff00 #define HC_HTXnL8BasH_MASK 0x00ff0000 #define HC_HTXnL7BasH_SHIFT 8 #define HC_HTXnL8BasH_SHIFT 16 /* HC_SubA_HTXnL9abBasH 0x0023 */ #define HC_HTXnL9BasH_MASK 0x000000ff #define HC_HTXnLaBasH_MASK 0x0000ff00 #define HC_HTXnLbBasH_MASK 0x00ff0000 #define HC_HTXnLaBasH_SHIFT 8 #define HC_HTXnLbBasH_SHIFT 16 /* HC_SubA_HTXnLcdeBasH 0x0024 */ #define HC_HTXnLcBasH_MASK 0x000000ff #define HC_HTXnLdBasH_MASK 0x0000ff00 #define HC_HTXnLeBasH_MASK 0x00ff0000 #define HC_HTXnLdBasH_SHIFT 8 #define HC_HTXnLeBasH_SHIFT 16 /* HC_SubA_HTXnLcdeBasH 0x0025 */ #define HC_HTXnLfBasH_MASK 0x000000ff #define HC_HTXnL10BasH_MASK 0x0000ff00 #define HC_HTXnL11BasH_MASK 0x00ff0000 #define HC_HTXnL10BasH_SHIFT 8 #define HC_HTXnL11BasH_SHIFT 16 /* HC_SubA_HTXnL0Pit 0x002b */ #define HC_HTXnLnPit_MASK 0x00003fff #define HC_HTXnEnPit_MASK 0x00080000 #define HC_HTXnLnPitE_MASK 0x00f00000 #define HC_HTXnLnPitE_SHIFT 20 /* HC_SubA_HTXnL0_5WE 0x004b */ #define HC_HTXnL0WE_MASK 0x0000000f #define HC_HTXnL1WE_MASK 0x000000f0 #define HC_HTXnL2WE_MASK 0x00000f00 #define HC_HTXnL3WE_MASK 0x0000f000 #define HC_HTXnL4WE_MASK 0x000f0000 #define HC_HTXnL5WE_MASK 0x00f00000 #define HC_HTXnL1WE_SHIFT 4 #define HC_HTXnL2WE_SHIFT 8 #define HC_HTXnL3WE_SHIFT 12 #define HC_HTXnL4WE_SHIFT 16 #define HC_HTXnL5WE_SHIFT 20 /* HC_SubA_HTXnL6_bWE 0x004c */ #define HC_HTXnL6WE_MASK 0x0000000f #define HC_HTXnL7WE_MASK 0x000000f0 #define HC_HTXnL8WE_MASK 0x00000f00 #define HC_HTXnL9WE_MASK 0x0000f000 #define HC_HTXnLaWE_MASK 0x000f0000 #define HC_HTXnLbWE_MASK 0x00f00000 #define HC_HTXnL7WE_SHIFT 4 #define HC_HTXnL8WE_SHIFT 8 #define HC_HTXnL9WE_SHIFT 12 #define HC_HTXnLaWE_SHIFT 16 #define HC_HTXnLbWE_SHIFT 20 /* HC_SubA_HTXnLc_11WE 0x004d */ #define HC_HTXnLcWE_MASK 0x0000000f #define HC_HTXnLdWE_MASK 0x000000f0 #define HC_HTXnLeWE_MASK 0x00000f00 #define HC_HTXnLfWE_MASK 0x0000f000 #define HC_HTXnL10WE_MASK 0x000f0000 #define HC_HTXnL11WE_MASK 0x00f00000 #define HC_HTXnLdWE_SHIFT 4 #define HC_HTXnLeWE_SHIFT 8 #define HC_HTXnLfWE_SHIFT 12 #define HC_HTXnL10WE_SHIFT 16 #define HC_HTXnL11WE_SHIFT 20 /* HC_SubA_HTXnL0_5HE 0x0051 */ #define HC_HTXnL0HE_MASK 0x0000000f #define HC_HTXnL1HE_MASK 0x000000f0 #define HC_HTXnL2HE_MASK 0x00000f00 #define HC_HTXnL3HE_MASK 0x0000f000 #define HC_HTXnL4HE_MASK 0x000f0000 #define HC_HTXnL5HE_MASK 0x00f00000 #define HC_HTXnL1HE_SHIFT 4 #define HC_HTXnL2HE_SHIFT 8 #define HC_HTXnL3HE_SHIFT 12 #define HC_HTXnL4HE_SHIFT 16 #define HC_HTXnL5HE_SHIFT 20 /* HC_SubA_HTXnL6_bHE 0x0052 */ #define HC_HTXnL6HE_MASK 0x0000000f #define HC_HTXnL7HE_MASK 0x000000f0 #define HC_HTXnL8HE_MASK 0x00000f00 #define HC_HTXnL9HE_MASK 0x0000f000 #define HC_HTXnLaHE_MASK 0x000f0000 #define HC_HTXnLbHE_MASK 0x00f00000 #define HC_HTXnL7HE_SHIFT 4 #define HC_HTXnL8HE_SHIFT 8 #define HC_HTXnL9HE_SHIFT 12 #define HC_HTXnLaHE_SHIFT 16 #define HC_HTXnLbHE_SHIFT 20 /* HC_SubA_HTXnLc_11HE 0x0053 */ #define HC_HTXnLcHE_MASK 0x0000000f #define HC_HTXnLdHE_MASK 0x000000f0 #define HC_HTXnLeHE_MASK 0x00000f00 #define HC_HTXnLfHE_MASK 0x0000f000 #define HC_HTXnL10HE_MASK 0x000f0000 #define HC_HTXnL11HE_MASK 0x00f00000 #define HC_HTXnLdHE_SHIFT 4 #define HC_HTXnLeHE_SHIFT 8 #define HC_HTXnLfHE_SHIFT 12 #define HC_HTXnL10HE_SHIFT 16 #define HC_HTXnL11HE_SHIFT 20 /* HC_SubA_HTXnL0OS 0x0077 */ #define HC_HTXnL0OS_MASK 0x003ff000 #define HC_HTXnLVmax_MASK 0x00000fc0 #define HC_HTXnLVmin_MASK 0x0000003f #define HC_HTXnL0OS_SHIFT 12 #define HC_HTXnLVmax_SHIFT 6 /* HC_SubA_HTXnTB 0x0078 */ #define HC_HTXnTB_MASK 0x00f00000 #define HC_HTXnFLSe_MASK 0x0000e000 #define HC_HTXnFLSs_MASK 0x00001c00 #define HC_HTXnFLTe_MASK 0x00000380 #define HC_HTXnFLTs_MASK 0x00000070 #define HC_HTXnFLDs_MASK 0x0000000f #define HC_HTXnTB_NoTB 0x00000000 #define HC_HTXnTB_TBC_S 0x00100000 #define HC_HTXnTB_TBC_T 0x00200000 #define HC_HTXnTB_TB_S 0x00400000 #define HC_HTXnTB_TB_T 0x00800000 #define HC_HTXnFLSe_Nearest 0x00000000 #define HC_HTXnFLSe_Linear 0x00002000 #define HC_HTXnFLSe_NonLinear 0x00004000 #define HC_HTXnFLSe_Sharp 0x00008000 #define HC_HTXnFLSe_Flat_Gaussian_Cubic 0x0000c000 #define HC_HTXnFLSs_Nearest 0x00000000 #define HC_HTXnFLSs_Linear 0x00000400 #define HC_HTXnFLSs_NonLinear 0x00000800 #define HC_HTXnFLSs_Flat_Gaussian_Cubic 0x00001800 #define HC_HTXnFLTe_Nearest 0x00000000 #define HC_HTXnFLTe_Linear 0x00000080 #define HC_HTXnFLTe_NonLinear 0x00000100 #define HC_HTXnFLTe_Sharp 0x00000180 #define HC_HTXnFLTe_Flat_Gaussian_Cubic 0x00000300 #define HC_HTXnFLTs_Nearest 0x00000000 #define HC_HTXnFLTs_Linear 0x00000010 #define HC_HTXnFLTs_NonLinear 0x00000020 #define HC_HTXnFLTs_Flat_Gaussian_Cubic 0x00000060 #define HC_HTXnFLDs_Tex0 0x00000000 #define HC_HTXnFLDs_Nearest 0x00000001 #define HC_HTXnFLDs_Linear 0x00000002 #define HC_HTXnFLDs_NonLinear 0x00000003 #define HC_HTXnFLDs_Dither 0x00000004 #define HC_HTXnFLDs_ConstLOD 0x00000005 #define HC_HTXnFLDs_Ani 0x00000006 #define HC_HTXnFLDs_AniDither 0x00000007 /* HC_SubA_HTXnMPMD 0x0079 */ #define HC_HTXnMPMD_SMASK 0x00070000 #define HC_HTXnMPMD_TMASK 0x00380000 #define HC_HTXnLODDTf_MASK 0x00000007 #define HC_HTXnXY2ST_MASK 0x00000008 #define HC_HTXnMPMD_Tsingle 0x00000000 #define HC_HTXnMPMD_Tclamp 0x00080000 #define HC_HTXnMPMD_Trepeat 0x00100000 #define HC_HTXnMPMD_Tmirror 0x00180000 #define HC_HTXnMPMD_Twrap 0x00200000 #define HC_HTXnMPMD_Ssingle 0x00000000 #define HC_HTXnMPMD_Sclamp 0x00010000 #define HC_HTXnMPMD_Srepeat 0x00020000 #define HC_HTXnMPMD_Smirror 0x00030000 #define HC_HTXnMPMD_Swrap 0x00040000 /* HC_SubA_HTXnCLODu 0x007a */ #define HC_HTXnCLODu_MASK 0x000ffc00 #define HC_HTXnCLODd_MASK 0x000003ff #define HC_HTXnCLODu_SHIFT 10 /* HC_SubA_HTXnFM 0x007b */ #define HC_HTXnFM_MASK 0x00ff0000 #define HC_HTXnLoc_MASK 0x00000003 #define HC_HTXnFM_INDEX 0x00000000 #define HC_HTXnFM_Intensity 0x00080000 #define HC_HTXnFM_Lum 0x00100000 #define HC_HTXnFM_Alpha 0x00180000 #define HC_HTXnFM_DX 0x00280000 #define HC_HTXnFM_ARGB16 0x00880000 #define HC_HTXnFM_ARGB32 0x00980000 #define HC_HTXnFM_ABGR16 0x00a80000 #define HC_HTXnFM_ABGR32 0x00b80000 #define HC_HTXnFM_RGBA16 0x00c80000 #define HC_HTXnFM_RGBA32 0x00d80000 #define HC_HTXnFM_BGRA16 0x00e80000 #define HC_HTXnFM_BGRA32 0x00f80000 #define HC_HTXnFM_BUMPMAP 0x00380000 #define HC_HTXnFM_Index1 (HC_HTXnFM_INDEX | 0x00000000) #define HC_HTXnFM_Index2 (HC_HTXnFM_INDEX | 0x00010000) #define HC_HTXnFM_Index4 (HC_HTXnFM_INDEX | 0x00020000) #define HC_HTXnFM_Index8 (HC_HTXnFM_INDEX | 0x00030000) #define HC_HTXnFM_T1 (HC_HTXnFM_Intensity | 0x00000000) #define HC_HTXnFM_T2 (HC_HTXnFM_Intensity | 0x00010000) #define HC_HTXnFM_T4 (HC_HTXnFM_Intensity | 0x00020000) #define HC_HTXnFM_T8 (HC_HTXnFM_Intensity | 0x00030000) #define HC_HTXnFM_L1 (HC_HTXnFM_Lum | 0x00000000) #define HC_HTXnFM_L2 (HC_HTXnFM_Lum | 0x00010000) #define HC_HTXnFM_L4 (HC_HTXnFM_Lum | 0x00020000) #define HC_HTXnFM_L8 (HC_HTXnFM_Lum | 0x00030000) #define HC_HTXnFM_AL44 (HC_HTXnFM_Lum | 0x00040000) #define HC_HTXnFM_AL88 (HC_HTXnFM_Lum | 0x00050000) #define HC_HTXnFM_A1 (HC_HTXnFM_Alpha | 0x00000000) #define HC_HTXnFM_A2 (HC_HTXnFM_Alpha | 0x00010000) #define HC_HTXnFM_A4 (HC_HTXnFM_Alpha | 0x00020000) #define HC_HTXnFM_A8 (HC_HTXnFM_Alpha | 0x00030000) #define HC_HTXnFM_DX1 (HC_HTXnFM_DX | 0x00010000) #define HC_HTXnFM_DX23 (HC_HTXnFM_DX | 0x00020000) #define HC_HTXnFM_DX45 (HC_HTXnFM_DX | 0x00030000) #define HC_HTXnFM_RGB555 (HC_HTXnFM_ARGB16 | 0x00000000) #define HC_HTXnFM_RGB565 (HC_HTXnFM_ARGB16 | 0x00010000) #define HC_HTXnFM_ARGB1555 (HC_HTXnFM_ARGB16 | 0x00020000) #define HC_HTXnFM_ARGB4444 (HC_HTXnFM_ARGB16 | 0x00030000) #define HC_HTXnFM_ARGB0888 (HC_HTXnFM_ARGB32 | 0x00000000) #define HC_HTXnFM_ARGB8888 (HC_HTXnFM_ARGB32 | 0x00010000) #define HC_HTXnFM_BGR555 (HC_HTXnFM_ABGR16 | 0x00000000) #define HC_HTXnFM_BGR565 (HC_HTXnFM_ABGR16 | 0x00010000) #define HC_HTXnFM_ABGR1555 (HC_HTXnFM_ABGR16 | 0x00020000) #define HC_HTXnFM_ABGR4444 (HC_HTXnFM_ABGR16 | 0x00030000) #define HC_HTXnFM_ABGR0888 (HC_HTXnFM_ABGR32 | 0x00000000) #define HC_HTXnFM_ABGR8888 (HC_HTXnFM_ABGR32 | 0x00010000) #define HC_HTXnFM_RGBA5550 (HC_HTXnFM_RGBA16 | 0x00000000) #define HC_HTXnFM_RGBA5551 (HC_HTXnFM_RGBA16 | 0x00020000) #define HC_HTXnFM_RGBA4444 (HC_HTXnFM_RGBA16 | 0x00030000) #define HC_HTXnFM_RGBA8880 (HC_HTXnFM_RGBA32 | 0x00000000) #define HC_HTXnFM_RGBA8888 (HC_HTXnFM_RGBA32 | 0x00010000) #define HC_HTXnFM_BGRA5550 (HC_HTXnFM_BGRA16 | 0x00000000) #define HC_HTXnFM_BGRA5551 (HC_HTXnFM_BGRA16 | 0x00020000) #define HC_HTXnFM_BGRA4444 (HC_HTXnFM_BGRA16 | 0x00030000) #define HC_HTXnFM_BGRA8880 (HC_HTXnFM_BGRA32 | 0x00000000) #define HC_HTXnFM_BGRA8888 (HC_HTXnFM_BGRA32 | 0x00010000) #define HC_HTXnFM_VU88 (HC_HTXnFM_BUMPMAP | 0x00000000) #define HC_HTXnFM_LVU655 (HC_HTXnFM_BUMPMAP | 0x00010000) #define HC_HTXnFM_LVU888 (HC_HTXnFM_BUMPMAP | 0x00020000) #define HC_HTXnLoc_Local 0x00000000 #define HC_HTXnLoc_Sys 0x00000002 #define HC_HTXnLoc_AGP 0x00000003 /* HC_SubA_HTXnTRAH 0x007f */ #define HC_HTXnTRAH_MASK 0x00ff0000 #define HC_HTXnTRAL_MASK 0x0000ff00 #define HC_HTXnTBA_MASK 0x000000ff #define HC_HTXnTRAH_SHIFT 16 #define HC_HTXnTRAL_SHIFT 8 /* HC_SubA_HTXnTBLCsat 0x0080 *-- Define the input texture. */ #define HC_XTC_TOPC 0x00000000 #define HC_XTC_InvTOPC 0x00000010 #define HC_XTC_TOPCp5 0x00000020 #define HC_XTC_Cbias 0x00000000 #define HC_XTC_InvCbias 0x00000010 #define HC_XTC_0 0x00000000 #define HC_XTC_Dif 0x00000001 #define HC_XTC_Spec 0x00000002 #define HC_XTC_Tex 0x00000003 #define HC_XTC_Cur 0x00000004 #define HC_XTC_Adif 0x00000005 #define HC_XTC_Fog 0x00000006 #define HC_XTC_Atex 0x00000007 #define HC_XTC_Acur 0x00000008 #define HC_XTC_HTXnTBLRC 0x00000009 #define HC_XTC_Ctexnext 0x0000000a /*-- */ #define HC_HTXnTBLCsat_MASK 0x00800000 #define HC_HTXnTBLCa_MASK 0x000fc000 #define HC_HTXnTBLCb_MASK 0x00001f80 #define HC_HTXnTBLCc_MASK 0x0000003f #define HC_HTXnTBLCa_TOPC (HC_XTC_TOPC << 14) #define HC_HTXnTBLCa_InvTOPC (HC_XTC_InvTOPC << 14) #define HC_HTXnTBLCa_TOPCp5 (HC_XTC_TOPCp5 << 14) #define HC_HTXnTBLCa_0 (HC_XTC_0 << 14) #define HC_HTXnTBLCa_Dif (HC_XTC_Dif << 14) #define HC_HTXnTBLCa_Spec (HC_XTC_Spec << 14) #define HC_HTXnTBLCa_Tex (HC_XTC_Tex << 14) #define HC_HTXnTBLCa_Cur (HC_XTC_Cur << 14) #define HC_HTXnTBLCa_Adif (HC_XTC_Adif << 14) #define HC_HTXnTBLCa_Fog (HC_XTC_Fog << 14) #define HC_HTXnTBLCa_Atex (HC_XTC_Atex << 14) #define HC_HTXnTBLCa_Acur (HC_XTC_Acur << 14) #define HC_HTXnTBLCa_HTXnTBLRC (HC_XTC_HTXnTBLRC << 14) #define HC_HTXnTBLCa_Ctexnext (HC_XTC_Ctexnext << 14) #define HC_HTXnTBLCb_TOPC (HC_XTC_TOPC << 7) #define HC_HTXnTBLCb_InvTOPC (HC_XTC_InvTOPC << 7) #define HC_HTXnTBLCb_TOPCp5 (HC_XTC_TOPCp5 << 7) #define HC_HTXnTBLCb_0 (HC_XTC_0 << 7) #define HC_HTXnTBLCb_Dif (HC_XTC_Dif << 7) #define HC_HTXnTBLCb_Spec (HC_XTC_Spec << 7) #define HC_HTXnTBLCb_Tex (HC_XTC_Tex << 7) #define HC_HTXnTBLCb_Cur (HC_XTC_Cur << 7) #define HC_HTXnTBLCb_Adif (HC_XTC_Adif << 7) #define HC_HTXnTBLCb_Fog (HC_XTC_Fog << 7) #define HC_HTXnTBLCb_Atex (HC_XTC_Atex << 7) #define HC_HTXnTBLCb_Acur (HC_XTC_Acur << 7) #define HC_HTXnTBLCb_HTXnTBLRC (HC_XTC_HTXnTBLRC << 7) #define HC_HTXnTBLCb_Ctexnext (HC_XTC_Ctexnext << 7) #define HC_HTXnTBLCc_TOPC (HC_XTC_TOPC << 0) #define HC_HTXnTBLCc_InvTOPC (HC_XTC_InvTOPC << 0) #define HC_HTXnTBLCc_TOPCp5 (HC_XTC_TOPCp5 << 0) #define HC_HTXnTBLCc_0 (HC_XTC_0 << 0) #define HC_HTXnTBLCc_Dif (HC_XTC_Dif << 0) #define HC_HTXnTBLCc_Spec (HC_XTC_Spec << 0) #define HC_HTXnTBLCc_Tex (HC_XTC_Tex << 0) #define HC_HTXnTBLCc_Cur (HC_XTC_Cur << 0) #define HC_HTXnTBLCc_Adif (HC_XTC_Adif << 0) #define HC_HTXnTBLCc_Fog (HC_XTC_Fog << 0) #define HC_HTXnTBLCc_Atex (HC_XTC_Atex << 0) #define HC_HTXnTBLCc_Acur (HC_XTC_Acur << 0) #define HC_HTXnTBLCc_HTXnTBLRC (HC_XTC_HTXnTBLRC << 0) #define HC_HTXnTBLCc_Ctexnext (HC_XTC_Ctexnext << 0) /* HC_SubA_HTXnTBLCop 0x0081 */ #define HC_HTXnTBLdot_MASK 0x00c00000 #define HC_HTXnTBLCop_MASK 0x00380000 #define HC_HTXnTBLCbias_MASK 0x0007c000 #define HC_HTXnTBLCshift_MASK 0x00001800 #define HC_HTXnTBLAop_MASK 0x00000380 #define HC_HTXnTBLAbias_MASK 0x00000078 #define HC_HTXnTBLAshift_MASK 0x00000003 #define HC_HTXnTBLCop_Add 0x00000000 #define HC_HTXnTBLCop_Sub 0x00080000 #define HC_HTXnTBLCop_Min 0x00100000 #define HC_HTXnTBLCop_Max 0x00180000 #define HC_HTXnTBLCop_Mask 0x00200000 #define HC_HTXnTBLCbias_Cbias (HC_XTC_Cbias << 14) #define HC_HTXnTBLCbias_InvCbias (HC_XTC_InvCbias << 14) #define HC_HTXnTBLCbias_0 (HC_XTC_0 << 14) #define HC_HTXnTBLCbias_Dif (HC_XTC_Dif << 14) #define HC_HTXnTBLCbias_Spec (HC_XTC_Spec << 14) #define HC_HTXnTBLCbias_Tex (HC_XTC_Tex << 14) #define HC_HTXnTBLCbias_Cur (HC_XTC_Cur << 14) #define HC_HTXnTBLCbias_Adif (HC_XTC_Adif << 14) #define HC_HTXnTBLCbias_Fog (HC_XTC_Fog << 14) #define HC_HTXnTBLCbias_Atex (HC_XTC_Atex << 14) #define HC_HTXnTBLCbias_Acur (HC_XTC_Acur << 14) #define HC_HTXnTBLCbias_HTXnTBLRC (HC_XTC_HTXnTBLRC << 14) #define HC_HTXnTBLCshift_1 0x00000000 #define HC_HTXnTBLCshift_2 0x00000800 #define HC_HTXnTBLCshift_No 0x00001000 #define HC_HTXnTBLCshift_DotP 0x00001800 /*=* John Sheng [2003.7.18] texture combine *=*/ #define HC_HTXnTBLDOT3 0x00080000 #define HC_HTXnTBLDOT4 0x000C0000 #define HC_HTXnTBLAop_Add 0x00000000 #define HC_HTXnTBLAop_Sub 0x00000080 #define HC_HTXnTBLAop_Min 0x00000100 #define HC_HTXnTBLAop_Max 0x00000180 #define HC_HTXnTBLAop_Mask 0x00000200 #define HC_HTXnTBLAbias_Inv 0x00000040 #define HC_HTXnTBLAbias_Adif 0x00000000 #define HC_HTXnTBLAbias_Fog 0x00000008 #define HC_HTXnTBLAbias_Acur 0x00000010 #define HC_HTXnTBLAbias_HTXnTBLRAbias 0x00000018 #define HC_HTXnTBLAbias_Atex 0x00000020 #define HC_HTXnTBLAshift_1 0x00000000 #define HC_HTXnTBLAshift_2 0x00000001 #define HC_HTXnTBLAshift_No 0x00000002 /* #define HC_HTXnTBLAshift_DotP 0x00000003 */ /* HC_SubA_HTXnTBLMPFog 0x0082 */ #define HC_HTXnTBLMPfog_MASK 0x00e00000 #define HC_HTXnTBLMPfog_0 0x00000000 #define HC_HTXnTBLMPfog_Adif 0x00200000 #define HC_HTXnTBLMPfog_Fog 0x00400000 #define HC_HTXnTBLMPfog_Atex 0x00600000 #define HC_HTXnTBLMPfog_Acur 0x00800000 #define HC_HTXnTBLMPfog_GHTXnTBLRFog 0x00a00000 /* HC_SubA_HTXnTBLAsat 0x0083 *-- Define the texture alpha input. */ #define HC_XTA_TOPA 0x00000000 #define HC_XTA_InvTOPA 0x00000008 #define HC_XTA_TOPAp5 0x00000010 #define HC_XTA_Adif 0x00000000 #define HC_XTA_Fog 0x00000001 #define HC_XTA_Acur 0x00000002 #define HC_XTA_HTXnTBLRA 0x00000003 #define HC_XTA_Atex 0x00000004 #define HC_XTA_Atexnext 0x00000005 /*-- */ #define HC_HTXnTBLAsat_MASK 0x00800000 #define HC_HTXnTBLAMB_MASK 0x00700000 #define HC_HTXnTBLAa_MASK 0x0007c000 #define HC_HTXnTBLAb_MASK 0x00000f80 #define HC_HTXnTBLAc_MASK 0x0000001f #define HC_HTXnTBLAMB_SHIFT 20 #define HC_HTXnTBLAa_TOPA (HC_XTA_TOPA << 14) #define HC_HTXnTBLAa_InvTOPA (HC_XTA_InvTOPA << 14) #define HC_HTXnTBLAa_TOPAp5 (HC_XTA_TOPAp5 << 14) #define HC_HTXnTBLAa_Adif (HC_XTA_Adif << 14) #define HC_HTXnTBLAa_Fog (HC_XTA_Fog << 14) #define HC_HTXnTBLAa_Acur (HC_XTA_Acur << 14) #define HC_HTXnTBLAa_HTXnTBLRA (HC_XTA_HTXnTBLRA << 14) #define HC_HTXnTBLAa_Atex (HC_XTA_Atex << 14) #define HC_HTXnTBLAa_Atexnext (HC_XTA_Atexnext << 14) #define HC_HTXnTBLAb_TOPA (HC_XTA_TOPA << 7) #define HC_HTXnTBLAb_InvTOPA (HC_XTA_InvTOPA << 7) #define HC_HTXnTBLAb_TOPAp5 (HC_XTA_TOPAp5 << 7) #define HC_HTXnTBLAb_Adif (HC_XTA_Adif << 7) #define HC_HTXnTBLAb_Fog (HC_XTA_Fog << 7) #define HC_HTXnTBLAb_Acur (HC_XTA_Acur << 7) #define HC_HTXnTBLAb_HTXnTBLRA (HC_XTA_HTXnTBLRA << 7) #define HC_HTXnTBLAb_Atex (HC_XTA_Atex << 7) #define HC_HTXnTBLAb_Atexnext (HC_XTA_Atexnext << 7) #define HC_HTXnTBLAc_TOPA (HC_XTA_TOPA << 0) #define HC_HTXnTBLAc_InvTOPA (HC_XTA_InvTOPA << 0) #define HC_HTXnTBLAc_TOPAp5 (HC_XTA_TOPAp5 << 0) #define HC_HTXnTBLAc_Adif (HC_XTA_Adif << 0) #define HC_HTXnTBLAc_Fog (HC_XTA_Fog << 0) #define HC_HTXnTBLAc_Acur (HC_XTA_Acur << 0) #define HC_HTXnTBLAc_HTXnTBLRA (HC_XTA_HTXnTBLRA << 0) #define HC_HTXnTBLAc_Atex (HC_XTA_Atex << 0) #define HC_HTXnTBLAc_Atexnext (HC_XTA_Atexnext << 0) /* HC_SubA_HTXnTBLRAa 0x0089 */ #define HC_HTXnTBLRAa_MASK 0x00ff0000 #define HC_HTXnTBLRAb_MASK 0x0000ff00 #define HC_HTXnTBLRAc_MASK 0x000000ff #define HC_HTXnTBLRAa_SHIFT 16 #define HC_HTXnTBLRAb_SHIFT 8 #define HC_HTXnTBLRAc_SHIFT 0 /* HC_SubA_HTXnTBLRFog 0x008a */ #define HC_HTXnTBLRFog_MASK 0x0000ff00 #define HC_HTXnTBLRAbias_MASK 0x000000ff #define HC_HTXnTBLRFog_SHIFT 8 #define HC_HTXnTBLRAbias_SHIFT 0 /* HC_SubA_HTXnLScale 0x0094 */ #define HC_HTXnLScale_MASK 0x0007fc00 #define HC_HTXnLOff_MASK 0x000001ff #define HC_HTXnLScale_SHIFT 10 /* HC_SubA_HTXSMD 0x0000 */ #define HC_HTXSMD_MASK 0x00000080 #define HC_HTXTMD_MASK 0x00000040 #define HC_HTXNum_MASK 0x00000038 #define HC_HTXTRMD_MASK 0x00000006 #define HC_HTXCHCLR_MASK 0x00000001 #define HC_HTXNum_SHIFT 3 /* Texture Palette n */ #define HC_SubType_TexPalette0 0x00000000 #define HC_SubType_TexPalette1 0x00000001 #define HC_SubType_FogTable 0x00000010 #define HC_SubType_Stipple 0x00000014 /* HC_SubA_TexPalette0 0x0000 */ #define HC_HTPnA_MASK 0xff000000 #define HC_HTPnR_MASK 0x00ff0000 #define HC_HTPnG_MASK 0x0000ff00 #define HC_HTPnB_MASK 0x000000ff /* HC_SubA_FogTable 0x0010 */ #define HC_HFPn3_MASK 0xff000000 #define HC_HFPn2_MASK 0x00ff0000 #define HC_HFPn1_MASK 0x0000ff00 #define HC_HFPn_MASK 0x000000ff #define HC_HFPn3_SHIFT 24 #define HC_HFPn2_SHIFT 16 #define HC_HFPn1_SHIFT 8 /* Auto Testing & Security */ #define HC_SubA_HenFIFOAT 0x0000 #define HC_SubA_HFBDrawFirst 0x0004 #define HC_SubA_HFBBasL 0x0005 #define HC_SubA_HFBDst 0x0006 /* HC_SubA_HenFIFOAT 0x0000 */ #define HC_HenFIFOAT_MASK 0x00000020 #define HC_HenGEMILock_MASK 0x00000010 #define HC_HenFBASwap_MASK 0x00000008 #define HC_HenOT_MASK 0x00000004 #define HC_HenCMDQ_MASK 0x00000002 #define HC_HenTXCTSU_MASK 0x00000001 /* HC_SubA_HFBDrawFirst 0x0004 */ #define HC_HFBDrawFirst_MASK 0x00000800 #define HC_HFBQueue_MASK 0x00000400 #define HC_HFBLock_MASK 0x00000200 #define HC_HEOF_MASK 0x00000100 #define HC_HFBBasH_MASK 0x000000ff /* GEMI Setting */ #define HC_SubA_HTArbRCM 0x0008 #define HC_SubA_HTArbRZ 0x000a #define HC_SubA_HTArbWZ 0x000b #define HC_SubA_HTArbRTX 0x000c #define HC_SubA_HTArbRCW 0x000d #define HC_SubA_HTArbE2 0x000e #define HC_SubA_HArbRQCM 0x0010 #define HC_SubA_HArbWQCM 0x0011 #define HC_SubA_HGEMITout 0x0020 #define HC_SubA_HFthRTXD 0x0040 #define HC_SubA_HFthRTXA 0x0044 #define HC_SubA_HCMDQstL 0x0050 #define HC_SubA_HCMDQendL 0x0051 #define HC_SubA_HCMDQLen 0x0052 /* HC_SubA_HTArbRCM 0x0008 */ #define HC_HTArbRCM_MASK 0x0000ffff /* HC_SubA_HTArbRZ 0x000a */ #define HC_HTArbRZ_MASK 0x0000ffff /* HC_SubA_HTArbWZ 0x000b */ #define HC_HTArbWZ_MASK 0x0000ffff /* HC_SubA_HTArbRTX 0x000c */ #define HC_HTArbRTX_MASK 0x0000ffff /* HC_SubA_HTArbRCW 0x000d */ #define HC_HTArbRCW_MASK 0x0000ffff /* HC_SubA_HTArbE2 0x000e */ #define HC_HTArbE2_MASK 0x0000ffff /* HC_SubA_HArbRQCM 0x0010 */ #define HC_HTArbRQCM_MASK 0x0000ffff /* HC_SubA_HArbWQCM 0x0011 */ #define HC_HArbWQCM_MASK 0x0000ffff /* HC_SubA_HGEMITout 0x0020 */ #define HC_HGEMITout_MASK 0x000f0000 #define HC_HNPArbZC_MASK 0x0000ffff #define HC_HGEMITout_SHIFT 16 /* HC_SubA_HFthRTXD 0x0040 */ #define HC_HFthRTXD_MASK 0x00ff0000 #define HC_HFthRZD_MASK 0x0000ff00 #define HC_HFthWZD_MASK 0x000000ff #define HC_HFthRTXD_SHIFT 16 #define HC_HFthRZD_SHIFT 8 /* HC_SubA_HFthRTXA 0x0044 */ #define HC_HFthRTXA_MASK 0x000000ff /****************************************************************************** ** Define the Halcyon Internal register access constants. For simulator only. ******************************************************************************/ #define HC_SIMA_HAGPBstL 0x0000 #define HC_SIMA_HAGPBendL 0x0001 #define HC_SIMA_HAGPCMNT 0x0002 #define HC_SIMA_HAGPBpL 0x0003 #define HC_SIMA_HAGPBpH 0x0004 #define HC_SIMA_HClipTB 0x0005 #define HC_SIMA_HClipLR 0x0006 #define HC_SIMA_HFPClipTL 0x0007 #define HC_SIMA_HFPClipBL 0x0008 #define HC_SIMA_HFPClipLL 0x0009 #define HC_SIMA_HFPClipRL 0x000a #define HC_SIMA_HFPClipTBH 0x000b #define HC_SIMA_HFPClipLRH 0x000c #define HC_SIMA_HLP 0x000d #define HC_SIMA_HLPRF 0x000e #define HC_SIMA_HSolidCL 0x000f #define HC_SIMA_HPixGC 0x0010 #define HC_SIMA_HSPXYOS 0x0011 #define HC_SIMA_HCmdA 0x0012 #define HC_SIMA_HCmdB 0x0013 #define HC_SIMA_HEnable 0x0014 #define HC_SIMA_HZWBBasL 0x0015 #define HC_SIMA_HZWBBasH 0x0016 #define HC_SIMA_HZWBType 0x0017 #define HC_SIMA_HZBiasL 0x0018 #define HC_SIMA_HZWBend 0x0019 #define HC_SIMA_HZWTMD 0x001a #define HC_SIMA_HZWCDL 0x001b #define HC_SIMA_HZWCTAGnum 0x001c #define HC_SIMA_HZCYNum 0x001d #define HC_SIMA_HZWCFire 0x001e /* #define HC_SIMA_HSBBasL 0x001d */ /* #define HC_SIMA_HSBBasH 0x001e */ /* #define HC_SIMA_HSBFM 0x001f */ #define HC_SIMA_HSTREF 0x0020 #define HC_SIMA_HSTMD 0x0021 #define HC_SIMA_HABBasL 0x0022 #define HC_SIMA_HABBasH 0x0023 #define HC_SIMA_HABFM 0x0024 #define HC_SIMA_HATMD 0x0025 #define HC_SIMA_HABLCsat 0x0026 #define HC_SIMA_HABLCop 0x0027 #define HC_SIMA_HABLAsat 0x0028 #define HC_SIMA_HABLAop 0x0029 #define HC_SIMA_HABLRCa 0x002a #define HC_SIMA_HABLRFCa 0x002b #define HC_SIMA_HABLRCbias 0x002c #define HC_SIMA_HABLRCb 0x002d #define HC_SIMA_HABLRFCb 0x002e #define HC_SIMA_HABLRAa 0x002f #define HC_SIMA_HABLRAb 0x0030 #define HC_SIMA_HDBBasL 0x0031 #define HC_SIMA_HDBBasH 0x0032 #define HC_SIMA_HDBFM 0x0033 #define HC_SIMA_HFBBMSKL 0x0034 #define HC_SIMA_HROP 0x0035 #define HC_SIMA_HFogLF 0x0036 #define HC_SIMA_HFogCL 0x0037 #define HC_SIMA_HFogCH 0x0038 #define HC_SIMA_HFogStL 0x0039 #define HC_SIMA_HFogStH 0x003a #define HC_SIMA_HFogOOdMF 0x003b #define HC_SIMA_HFogOOdEF 0x003c #define HC_SIMA_HFogEndL 0x003d #define HC_SIMA_HFogDenst 0x003e /*---- start of texture 0 setting ---- */ #define HC_SIMA_HTX0L0BasL 0x0040 #define HC_SIMA_HTX0L1BasL 0x0041 #define HC_SIMA_HTX0L2BasL 0x0042 #define HC_SIMA_HTX0L3BasL 0x0043 #define HC_SIMA_HTX0L4BasL 0x0044 #define HC_SIMA_HTX0L5BasL 0x0045 #define HC_SIMA_HTX0L6BasL 0x0046 #define HC_SIMA_HTX0L7BasL 0x0047 #define HC_SIMA_HTX0L8BasL 0x0048 #define HC_SIMA_HTX0L9BasL 0x0049 #define HC_SIMA_HTX0LaBasL 0x004a #define HC_SIMA_HTX0LbBasL 0x004b #define HC_SIMA_HTX0LcBasL 0x004c #define HC_SIMA_HTX0LdBasL 0x004d #define HC_SIMA_HTX0LeBasL 0x004e #define HC_SIMA_HTX0LfBasL 0x004f #define HC_SIMA_HTX0L10BasL 0x0050 #define HC_SIMA_HTX0L11BasL 0x0051 #define HC_SIMA_HTX0L012BasH 0x0052 #define HC_SIMA_HTX0L345BasH 0x0053 #define HC_SIMA_HTX0L678BasH 0x0054 #define HC_SIMA_HTX0L9abBasH 0x0055 #define HC_SIMA_HTX0LcdeBasH 0x0056 #define HC_SIMA_HTX0Lf1011BasH 0x0057 #define HC_SIMA_HTX0L0Pit 0x0058 #define HC_SIMA_HTX0L1Pit 0x0059 #define HC_SIMA_HTX0L2Pit 0x005a #define HC_SIMA_HTX0L3Pit 0x005b #define HC_SIMA_HTX0L4Pit 0x005c #define HC_SIMA_HTX0L5Pit 0x005d #define HC_SIMA_HTX0L6Pit 0x005e #define HC_SIMA_HTX0L7Pit 0x005f #define HC_SIMA_HTX0L8Pit 0x0060 #define HC_SIMA_HTX0L9Pit 0x0061 #define HC_SIMA_HTX0LaPit 0x0062 #define HC_SIMA_HTX0LbPit 0x0063 #define HC_SIMA_HTX0LcPit 0x0064 #define HC_SIMA_HTX0LdPit 0x0065 #define HC_SIMA_HTX0LePit 0x0066 #define HC_SIMA_HTX0LfPit 0x0067 #define HC_SIMA_HTX0L10Pit 0x0068 #define HC_SIMA_HTX0L11Pit 0x0069 #define HC_SIMA_HTX0L0_5WE 0x006a #define HC_SIMA_HTX0L6_bWE 0x006b #define HC_SIMA_HTX0Lc_11WE 0x006c #define HC_SIMA_HTX0L0_5HE 0x006d #define HC_SIMA_HTX0L6_bHE 0x006e #define HC_SIMA_HTX0Lc_11HE 0x006f #define HC_SIMA_HTX0L0OS 0x0070 #define HC_SIMA_HTX0TB 0x0071 #define HC_SIMA_HTX0MPMD 0x0072 #define HC_SIMA_HTX0CLODu 0x0073 #define HC_SIMA_HTX0FM 0x0074 #define HC_SIMA_HTX0TRCH 0x0075 #define HC_SIMA_HTX0TRCL 0x0076 #define HC_SIMA_HTX0TBC 0x0077 #define HC_SIMA_HTX0TRAH 0x0078 #define HC_SIMA_HTX0TBLCsat 0x0079 #define HC_SIMA_HTX0TBLCop 0x007a #define HC_SIMA_HTX0TBLMPfog 0x007b #define HC_SIMA_HTX0TBLAsat 0x007c #define HC_SIMA_HTX0TBLRCa 0x007d #define HC_SIMA_HTX0TBLRCb 0x007e #define HC_SIMA_HTX0TBLRCc 0x007f #define HC_SIMA_HTX0TBLRCbias 0x0080 #define HC_SIMA_HTX0TBLRAa 0x0081 #define HC_SIMA_HTX0TBLRFog 0x0082 #define HC_SIMA_HTX0BumpM00 0x0083 #define HC_SIMA_HTX0BumpM01 0x0084 #define HC_SIMA_HTX0BumpM10 0x0085 #define HC_SIMA_HTX0BumpM11 0x0086 #define HC_SIMA_HTX0LScale 0x0087 /*---- end of texture 0 setting ---- 0x008f */ #define HC_SIMA_TX0TX1_OFF 0x0050 /*---- start of texture 1 setting ---- */ #define HC_SIMA_HTX1L0BasL (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L1BasL (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L2BasL (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L3BasL (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L4BasL (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L5BasL (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L6BasL (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L7BasL (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L8BasL (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L9BasL (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LaBasL (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LbBasL (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LcBasL (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LdBasL (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LeBasL (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LfBasL (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L10BasL (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L11BasL (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L012BasH (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L345BasH (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L678BasH (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L9abBasH (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LcdeBasH (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1Lf1011BasH (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L0Pit (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L1Pit (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L2Pit (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L3Pit (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L4Pit (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L5Pit (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L6Pit (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L7Pit (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L8Pit (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L9Pit (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LaPit (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LbPit (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LcPit (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LdPit (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LePit (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LfPit (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L10Pit (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L11Pit (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L0_5WE (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L6_bWE (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1Lc_11WE (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L0_5HE (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L6_bHE (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1Lc_11HE (HC_SIMA_HTX0Lc_11HE + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1L0OS (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TB (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1MPMD (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1CLODu (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1FM (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TRCH (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TRCL (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TBC (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TRAH (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LTC (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LTA (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TBLCsat (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TBLCop (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TBLMPfog (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TBLAsat (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TBLRCa (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TBLRCb (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TBLRCc (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TBLRCbias (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TBLRAa (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1TBLRFog (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1BumpM00 (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1BumpM01 (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1BumpM10 (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1BumpM11 (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF) #define HC_SIMA_HTX1LScale (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF) /*---- end of texture 1 setting ---- 0xaf */ #define HC_SIMA_HTXSMD 0x00b0 #define HC_SIMA_HenFIFOAT 0x00b1 #define HC_SIMA_HFBDrawFirst 0x00b2 #define HC_SIMA_HFBBasL 0x00b3 #define HC_SIMA_HTArbRCM 0x00b4 #define HC_SIMA_HTArbRZ 0x00b5 #define HC_SIMA_HTArbWZ 0x00b6 #define HC_SIMA_HTArbRTX 0x00b7 #define HC_SIMA_HTArbRCW 0x00b8 #define HC_SIMA_HTArbE2 0x00b9 #define HC_SIMA_HGEMITout 0x00ba #define HC_SIMA_HFthRTXD 0x00bb #define HC_SIMA_HFthRTXA 0x00bc /* Define the texture palette 0 */ #define HC_SIMA_HTP0 0x0100 #define HC_SIMA_HTP1 0x0200 #define HC_SIMA_FOGTABLE 0x0300 #define HC_SIMA_STIPPLE 0x0400 #define HC_SIMA_HE3Fire 0x0440 #define HC_SIMA_TRANS_SET 0x0441 #define HC_SIMA_HREngSt 0x0442 #define HC_SIMA_HRFIFOempty 0x0443 #define HC_SIMA_HRFIFOfull 0x0444 #define HC_SIMA_HRErr 0x0445 #define HC_SIMA_FIFOstatus 0x0446 /****************************************************************************** ** Define the AGP command header. ******************************************************************************/ #define HC_ACMD_MASK 0xfe000000 #define HC_ACMD_SUB_MASK 0x0c000000 #define HC_ACMD_HCmdA 0xee000000 #define HC_ACMD_HCmdB 0xec000000 #define HC_ACMD_HCmdC 0xea000000 #define HC_ACMD_H1 0xf0000000 #define HC_ACMD_H2 0xf2000000 #define HC_ACMD_H3 0xf4000000 #define HC_ACMD_H4 0xf6000000 #define HC_ACMD_H1IO_MASK 0x000001ff #define HC_ACMD_H2IO1_MASK 0x001ff000 #define HC_ACMD_H2IO2_MASK 0x000001ff #define HC_ACMD_H2IO1_SHIFT 12 #define HC_ACMD_H2IO2_SHIFT 0 #define HC_ACMD_H3IO_MASK 0x000001ff #define HC_ACMD_H3COUNT_MASK 0x01fff000 #define HC_ACMD_H3COUNT_SHIFT 12 #define HC_ACMD_H4ID_MASK 0x000001ff #define HC_ACMD_H4COUNT_MASK 0x01fffe00 #define HC_ACMD_H4COUNT_SHIFT 9 /******************************************************************************** ** Define Header ********************************************************************************/ #define HC_HEADER2 0xF210F110 /******************************************************************************** ** Define Dummy Value ********************************************************************************/ #define HC_DUMMY 0xCCCCCCCC /******************************************************************************** ** Define for DMA use ********************************************************************************/ #define HALCYON_HEADER2 0XF210F110 #define HALCYON_FIRECMD 0XEE100000 #define HALCYON_FIREMASK 0XFFF00000 #define HALCYON_CMDB 0XEC000000 #define HALCYON_CMDBMASK 0XFFFE0000 #define HALCYON_SUB_ADDR0 0X00000000 #define HALCYON_HEADER1MASK 0XFFFFFC00 #define HALCYON_HEADER1 0XF0000000 #define HC_SubA_HAGPBstL 0x0060 #define HC_SubA_HAGPBendL 0x0061 #define HC_SubA_HAGPCMNT 0x0062 #define HC_SubA_HAGPBpL 0x0063 #define HC_SubA_HAGPBpH 0x0064 #define HC_HAGPCMNT_MASK 0x00800000 #define HC_HCmdErrClr_MASK 0x00400000 #define HC_HAGPBendH_MASK 0x0000ff00 #define HC_HAGPBstH_MASK 0x000000ff #define HC_HAGPBendH_SHIFT 8 #define HC_HAGPBstH_SHIFT 0 #define HC_HAGPBpL_MASK 0x00fffffc #define HC_HAGPBpID_MASK 0x00000003 #define HC_HAGPBpID_PAUSE 0x00000000 #define HC_HAGPBpID_JUMP 0x00000001 #define HC_HAGPBpID_STOP 0x00000002 #define HC_HAGPBpH_MASK 0x00ffffff #define VIA_VIDEO_HEADER5 0xFE040000 #define VIA_VIDEO_HEADER6 0xFE050000 #define VIA_VIDEO_HEADER7 0xFE060000 #define VIA_VIDEOMASK 0xFFFF0000 #endif ='#n1846'>1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257
/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
/*
 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *    Gareth Hughes <gareth@valinux.com>
 *    Kevin E. Martin <martin@valinux.com>
 */

#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
#include "radeon_drm.h"
#include "radeon_drv.h"

/* ================================================================
 * Helper functions for client state checking and fixup
 */

static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
						    dev_priv,
						    struct drm_file *file_priv,
						    u32 * offset)
{
	u64 off = *offset;
	u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1;
	struct drm_radeon_driver_file_fields *radeon_priv;

	/* Hrm ... the story of the offset ... So this function converts
	 * the various ideas of what userland clients might have for an
	 * offset in the card address space into an offset into the card
	 * address space :) So with a sane client, it should just keep
	 * the value intact and just do some boundary checking. However,
	 * not all clients are sane. Some older clients pass us 0 based
	 * offsets relative to the start of the framebuffer and some may
	 * assume the AGP aperture it appended to the framebuffer, so we
	 * try to detect those cases and fix them up.
	 *
	 * Note: It might be a good idea here to make sure the offset lands
	 * in some "allowed" area to protect things like the PCIE GART...
	 */

	/* First, the best case, the offset already lands in either the
	 * framebuffer or the GART mapped space
	 */
	if (radeon_check_offset(dev_priv, off))
		return 0;

	/* Ok, that didn't happen... now check if we have a zero based
	 * offset that fits in the framebuffer + gart space, apply the
	 * magic offset we get from SETPARAM or calculated from fb_location
	 */
	if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
		radeon_priv = file_priv->driver_priv;
		off += radeon_priv->radeon_fb_delta;
	}

	/* Finally, assume we aimed at a GART offset if beyond the fb */
	if (off > fb_end)
		off = off - fb_end - 1 + dev_priv->gart_vm_start;

	/* Now recheck and fail if out of bounds */
	if (radeon_check_offset(dev_priv, off)) {
		DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off);
		*offset = off;
		return 0;
	}
	return -EINVAL;
}

static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
						     dev_priv,
						     struct drm_file *file_priv,
						     int id, u32 *data)
{
	switch (id) {

	case RADEON_EMIT_PP_MISC:
		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
		    &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
			DRM_ERROR("Invalid depth buffer offset\n");
			return -EINVAL;
		}
		break;

	case RADEON_EMIT_PP_CNTL:
		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
		    &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
			DRM_ERROR("Invalid colour buffer offset\n");
			return -EINVAL;
		}
		break;

	case R200_EMIT_PP_TXOFFSET_0:
	case R200_EMIT_PP_TXOFFSET_1:
	case R200_EMIT_PP_TXOFFSET_2:
	case R200_EMIT_PP_TXOFFSET_3:
	case R200_EMIT_PP_TXOFFSET_4:
	case R200_EMIT_PP_TXOFFSET_5:
		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
						  &data[0])) {
			DRM_ERROR("Invalid R200 texture offset\n");
			return -EINVAL;
		}
		break;

	case RADEON_EMIT_PP_TXFILTER_0:
	case RADEON_EMIT_PP_TXFILTER_1:
	case RADEON_EMIT_PP_TXFILTER_2:
		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
		    &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
			DRM_ERROR("Invalid R100 texture offset\n");
			return -EINVAL;
		}
		break;

	case R200_EMIT_PP_CUBIC_OFFSETS_0:
	case R200_EMIT_PP_CUBIC_OFFSETS_1:
	case R200_EMIT_PP_CUBIC_OFFSETS_2:
	case R200_EMIT_PP_CUBIC_OFFSETS_3:
	case R200_EMIT_PP_CUBIC_OFFSETS_4:
	case R200_EMIT_PP_CUBIC_OFFSETS_5:{
			int i;
			for (i = 0; i < 5; i++) {
				if (radeon_check_and_fixup_offset(dev_priv,
								  file_priv,
								  &data[i])) {
					DRM_ERROR
					    ("Invalid R200 cubic texture offset\n");
					return -EINVAL;
				}
			}
			break;
		}

	case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
	case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
	case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
			int i;
			for (i = 0; i < 5; i++) {
				if (radeon_check_and_fixup_offset(dev_priv,
								  file_priv,
								  &data[i])) {
					DRM_ERROR
					    ("Invalid R100 cubic texture offset\n");
					return -EINVAL;
				}
			}
		}
		break;

	case R200_EMIT_VAP_CTL: {
			RING_LOCALS;
			BEGIN_RING(2);
			OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
			ADVANCE_RING();
		}
		break;

	case RADEON_EMIT_RB3D_COLORPITCH:
	case RADEON_EMIT_RE_LINE_PATTERN:
	case RADEON_EMIT_SE_LINE_WIDTH:
	case RADEON_EMIT_PP_LUM_MATRIX:
	case RADEON_EMIT_PP_ROT_MATRIX_0:
	case RADEON_EMIT_RB3D_STENCILREFMASK:
	case RADEON_EMIT_SE_VPORT_XSCALE:
	case RADEON_EMIT_SE_CNTL:
	case RADEON_EMIT_SE_CNTL_STATUS:
	case RADEON_EMIT_RE_MISC:
	case RADEON_EMIT_PP_BORDER_COLOR_0:
	case RADEON_EMIT_PP_BORDER_COLOR_1:
	case RADEON_EMIT_PP_BORDER_COLOR_2:
	case RADEON_EMIT_SE_ZBIAS_FACTOR:
	case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
	case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
	case R200_EMIT_PP_TXCBLEND_0:
	case R200_EMIT_PP_TXCBLEND_1:
	case R200_EMIT_PP_TXCBLEND_2:
	case R200_EMIT_PP_TXCBLEND_3:
	case R200_EMIT_PP_TXCBLEND_4:
	case R200_EMIT_PP_TXCBLEND_5:
	case R200_EMIT_PP_TXCBLEND_6:
	case R200_EMIT_PP_TXCBLEND_7:
	case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
	case R200_EMIT_TFACTOR_0:
	case R200_EMIT_VTX_FMT_0:
	case R200_EMIT_MATRIX_SELECT_0:
	case R200_EMIT_TEX_PROC_CTL_2:
	case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
	case R200_EMIT_PP_TXFILTER_0:
	case R200_EMIT_PP_TXFILTER_1:
	case R200_EMIT_PP_TXFILTER_2:
	case R200_EMIT_PP_TXFILTER_3:
	case R200_EMIT_PP_TXFILTER_4:
	case R200_EMIT_PP_TXFILTER_5:
	case R200_EMIT_VTE_CNTL:
	case R200_EMIT_OUTPUT_VTX_COMP_SEL:
	case R200_EMIT_PP_TAM_DEBUG3:
	case R200_EMIT_PP_CNTL_X:
	case R200_EMIT_RB3D_DEPTHXY_OFFSET:
	case R200_EMIT_RE_AUX_SCISSOR_CNTL:
	case R200_EMIT_RE_SCISSOR_TL_0:
	case R200_EMIT_RE_SCISSOR_TL_1:
	case R200_EMIT_RE_SCISSOR_TL_2:
	case R200_EMIT_SE_VAP_CNTL_STATUS:
	case R200_EMIT_SE_VTX_STATE_CNTL:
	case R200_EMIT_RE_POINTSIZE:
	case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
	case R200_EMIT_PP_CUBIC_FACES_0:
	case R200_EMIT_PP_CUBIC_FACES_1:
	case R200_EMIT_PP_CUBIC_FACES_2:
	case R200_EMIT_PP_CUBIC_FACES_3:
	case R200_EMIT_PP_CUBIC_FACES_4:
	case R200_EMIT_PP_CUBIC_FACES_5:
	case RADEON_EMIT_PP_TEX_SIZE_0:
	case RADEON_EMIT_PP_TEX_SIZE_1:
	case RADEON_EMIT_PP_TEX_SIZE_2:
	case R200_EMIT_RB3D_BLENDCOLOR:
	case R200_EMIT_TCL_POINT_SPRITE_CNTL:
	case RADEON_EMIT_PP_CUBIC_FACES_0:
	case RADEON_EMIT_PP_CUBIC_FACES_1:
	case RADEON_EMIT_PP_CUBIC_FACES_2:
	case R200_EMIT_PP_TRI_PERF_CNTL:
	case R200_EMIT_PP_AFS_0:
	case R200_EMIT_PP_AFS_1:
	case R200_EMIT_ATF_TFACTOR:
	case R200_EMIT_PP_TXCTLALL_0:
	case R200_EMIT_PP_TXCTLALL_1:
	case R200_EMIT_PP_TXCTLALL_2:
	case R200_EMIT_PP_TXCTLALL_3:
	case R200_EMIT_PP_TXCTLALL_4:
	case R200_EMIT_PP_TXCTLALL_5:
	case R200_EMIT_VAP_PVS_CNTL:
		/* These packets don't contain memory offsets */
		break;

	default:
		DRM_ERROR("Unknown state packet ID %d\n", id);
		return -EINVAL;
	}

	return 0;
}

static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
						     dev_priv,
						     struct drm_file *file_priv,
						     drm_radeon_kcmd_buffer_t *
						     cmdbuf,
						     unsigned int *cmdsz)
{
	u32 *cmd = (u32 *) cmdbuf->buf;
	u32 offset, narrays;
	int count, i, k;

	*cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);

	if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
		DRM_ERROR("Not a type 3 packet\n");
		return -EINVAL;
	}

	if (4 * *cmdsz > cmdbuf->bufsz) {
		DRM_ERROR("Packet size larger than size of data provided\n");
		return -EINVAL;
	}

	switch(cmd[0] & 0xff00) {
	/* XXX Are there old drivers needing other packets? */

	case RADEON_3D_DRAW_IMMD:
	case RADEON_3D_DRAW_VBUF:
	case RADEON_3D_DRAW_INDX:
	case RADEON_WAIT_FOR_IDLE:
	case RADEON_CP_NOP:
	case RADEON_3D_CLEAR_ZMASK:
/*	case RADEON_CP_NEXT_CHAR:
	case RADEON_CP_PLY_NEXTSCAN:
	case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */
		/* these packets are safe */
		break;

	case RADEON_CP_3D_DRAW_IMMD_2:
	case RADEON_CP_3D_DRAW_VBUF_2:
	case RADEON_CP_3D_DRAW_INDX_2:
	case RADEON_3D_CLEAR_HIZ:
		/* safe but r200 only */
		if (dev_priv->microcode_version != UCODE_R200) {
			DRM_ERROR("Invalid 3d packet for r100-class chip\n");
			return -EINVAL;
		}
		break;

	case RADEON_3D_LOAD_VBPNTR:
		count = (cmd[0] >> 16) & 0x3fff;

		if (count > 18) { /* 12 arrays max */
			DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
				  count);
			return -EINVAL;
		}

		/* carefully check packet contents */
		narrays = cmd[1] & ~0xc000;
		k = 0;
		i = 2;
		while ((k < narrays) && (i < (count + 2))) {
			i++;		/* skip attribute field */
			if (radeon_check_and_fixup_offset(dev_priv, file_priv,
							  &cmd[i])) {
				DRM_ERROR
				    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
				     k, i);
				return -EINVAL;
			}
			k++;
			i++;
			if (k == narrays)
				break;
			/* have one more to process, they come in pairs */
			if (radeon_check_and_fixup_offset(dev_priv,
							  file_priv, &cmd[i]))
			{
				DRM_ERROR
				    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
				     k, i);
				return -EINVAL;
			}
			k++;
			i++;
		}
		/* do the counts match what we expect ? */
		if ((k != narrays) || (i != (count + 2))) {
			DRM_ERROR
			    ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
			      k, i, narrays, count + 1);
			return -EINVAL;
		}
		break;

	case RADEON_3D_RNDR_GEN_INDX_PRIM:
		if (dev_priv->microcode_version != UCODE_R100) {
			DRM_ERROR("Invalid 3d packet for r200-class chip\n");
			return -EINVAL;
		}
		if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
				DRM_ERROR("Invalid rndr_gen_indx offset\n");
				return -EINVAL;
		}
		break;

	case RADEON_CP_INDX_BUFFER:
		if (dev_priv->microcode_version != UCODE_R200) {
			DRM_ERROR("Invalid 3d packet for r100-class chip\n");
			return -EINVAL;
		}
		if ((cmd[1] & 0x8000ffff) != 0x80000810) {
			DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
			return -EINVAL;
		}
		if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
			DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
			return -EINVAL;
		}
		break;

	case RADEON_CNTL_HOSTDATA_BLT:
	case RADEON_CNTL_PAINT_MULTI:
	case RADEON_CNTL_BITBLT_MULTI:
		/* MSB of opcode: next DWORD GUI_CNTL */
		if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
			      | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
			offset = cmd[2] << 10;
			if (radeon_check_and_fixup_offset
			    (dev_priv, file_priv, &offset)) {
				DRM_ERROR("Invalid first packet offset\n");
				return -EINVAL;
			}
			cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
		}

		if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
		    (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
			offset = cmd[3] << 10;
			if (radeon_check_and_fixup_offset
			    (dev_priv, file_priv, &offset)) {
				DRM_ERROR("Invalid second packet offset\n");
				return -EINVAL;
			}
			cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
		}
		break;

	default:
		DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
		return -EINVAL;
	}

	return 0;
}

/* ================================================================
 * CP hardware state programming functions
 */

static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
					     struct drm_clip_rect * box)
{
	RING_LOCALS;

	DRM_DEBUG("   box:  x1=%d y1=%d  x2=%d y2=%d\n",
		  box->x1, box->y1, box->x2, box->y2);

	BEGIN_RING(4);
	OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
	OUT_RING((box->y1 << 16) | box->x1);
	OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
	OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1));
	ADVANCE_RING();
}

/* Emit 1.1 state
 */
static int radeon_emit_state(drm_radeon_private_t * dev_priv,
			     struct drm_file *file_priv,
			     drm_radeon_context_regs_t * ctx,
			     drm_radeon_texture_regs_t * tex,
			     unsigned int dirty)
{
	RING_LOCALS;
	DRM_DEBUG("dirty=0x%08x\n", dirty);

	if (dirty & RADEON_UPLOAD_CONTEXT) {
		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
						  &ctx->rb3d_depthoffset)) {
			DRM_ERROR("Invalid depth buffer offset\n");
			return -EINVAL;
		}

		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
						  &ctx->rb3d_coloroffset)) {
			DRM_ERROR("Invalid depth buffer offset\n");
			return -EINVAL;
		}

		BEGIN_RING(14);
		OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6));
		OUT_RING(ctx->pp_misc);
		OUT_RING(ctx->pp_fog_color);
		OUT_RING(ctx->re_solid_color);
		OUT_RING(ctx->rb3d_blendcntl);
		OUT_RING(ctx->rb3d_depthoffset);
		OUT_RING(ctx->rb3d_depthpitch);
		OUT_RING(ctx->rb3d_zstencilcntl);
		OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2));
		OUT_RING(ctx->pp_cntl);
		OUT_RING(ctx->rb3d_cntl);
		OUT_RING(ctx->rb3d_coloroffset);
		OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
		OUT_RING(ctx->rb3d_colorpitch);
		ADVANCE_RING();
	}

	if (dirty & RADEON_UPLOAD_VERTFMT) {
		BEGIN_RING(2);
		OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0));
		OUT_RING(ctx->se_coord_fmt);
		ADVANCE_RING();
	}

	if (dirty & RADEON_UPLOAD_LINE) {
		BEGIN_RING(5);
		OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1));
		OUT_RING(ctx->re_line_pattern);
		OUT_RING(ctx->re_line_state);
		OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0));
		OUT_RING(ctx->se_line_width);
		ADVANCE_RING();
	}

	if (dirty & RADEON_UPLOAD_BUMPMAP) {
		BEGIN_RING(5);
		OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0));
		OUT_RING(ctx->pp_lum_matrix);
		OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1));
		OUT_RING(ctx->pp_rot_matrix_0);
		OUT_RING(ctx->pp_rot_matrix_1);
		ADVANCE_RING();
	}

	if (dirty & RADEON_UPLOAD_MASKS) {
		BEGIN_RING(4);
		OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2));
		OUT_RING(ctx->rb3d_stencilrefmask);
		OUT_RING(ctx->rb3d_ropcntl);
		OUT_RING(ctx->rb3d_planemask);
		ADVANCE_RING();
	}

	if (dirty & RADEON_UPLOAD_VIEWPORT) {
		BEGIN_RING(7);
		OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5));
		OUT_RING(ctx->se_vport_xscale);
		OUT_RING(ctx->se_vport_xoffset);
		OUT_RING(ctx->se_vport_yscale);
		OUT_RING(ctx->se_vport_yoffset);
		OUT_RING(ctx->se_vport_zscale);
		OUT_RING(ctx->se_vport_zoffset);
		ADVANCE_RING();
	}

	if (dirty & RADEON_UPLOAD_SETUP) {
		BEGIN_RING(4);
		OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0));
		OUT_RING(ctx->se_cntl);
		OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0));
		OUT_RING(ctx->se_cntl_status);
		ADVANCE_RING();
	}

	if (dirty & RADEON_UPLOAD_MISC) {
		BEGIN_RING(2);
		OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0));
		OUT_RING(ctx->re_misc);
		ADVANCE_RING();
	}

	if (dirty & RADEON_UPLOAD_TEX0) {
		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
						  &tex[0].pp_txoffset)) {
			DRM_ERROR("Invalid texture offset for unit 0\n");
			return -EINVAL;
		}

		BEGIN_RING(9);
		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5));
		OUT_RING(tex[0].pp_txfilter);
		OUT_RING(tex[0].pp_txformat);
		OUT_RING(tex[0].pp_txoffset);
		OUT_RING(tex[0].pp_txcblend);
		OUT_RING(tex[0].pp_txablend);
		OUT_RING(tex[0].pp_tfactor);
		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0));
		OUT_RING(tex[0].pp_border_color);
		ADVANCE_RING();
	}

	if (dirty & RADEON_UPLOAD_TEX1) {
		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
						  &tex[1].pp_txoffset)) {
			DRM_ERROR("Invalid texture offset for unit 1\n");
			return -EINVAL;
		}

		BEGIN_RING(9);
		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5));
		OUT_RING(tex[1].pp_txfilter);
		OUT_RING(tex[1].pp_txformat);
		OUT_RING(tex[1].pp_txoffset);
		OUT_RING(tex[1].pp_txcblend);
		OUT_RING(tex[1].pp_txablend);
		OUT_RING(tex[1].pp_tfactor);
		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0));
		OUT_RING(tex[1].pp_border_color);
		ADVANCE_RING();
	}

	if (dirty & RADEON_UPLOAD_TEX2) {
		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
						  &tex[2].pp_txoffset)) {
			DRM_ERROR("Invalid texture offset for unit 2\n");
			return -EINVAL;
		}

		BEGIN_RING(9);
		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5));
		OUT_RING(tex[2].pp_txfilter);
		OUT_RING(tex[2].pp_txformat);
		OUT_RING(tex[2].pp_txoffset);
		OUT_RING(tex[2].pp_txcblend);
		OUT_RING(tex[2].pp_txablend);
		OUT_RING(tex[2].pp_tfactor);
		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0));
		OUT_RING(tex[2].pp_border_color);
		ADVANCE_RING();
	}

	return 0;
}

/* Emit 1.2 state
 */
static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
			      struct drm_file *file_priv,
			      drm_radeon_state_t * state)
{
	RING_LOCALS;

	if (state->dirty & RADEON_UPLOAD_ZBIAS) {
		BEGIN_RING(3);
		OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1));
		OUT_RING(state->context2.se_zbias_factor);
		OUT_RING(state->context2.se_zbias_constant);
		ADVANCE_RING();
	}

	return radeon_emit_state(dev_priv, file_priv, &state->context,
				 state->tex, state->dirty);
}

/* New (1.3) state mechanism.  3 commands (packet, scalar, vector) in
 * 1.3 cmdbuffers allow all previous state to be updated as well as
 * the tcl scalar and vector areas.
 */
static struct {
	int start;
	int len;
	const char *name;
} packet[RADEON_MAX_STATE_PACKETS] = {
	{RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
	{RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
	{RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
	{RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
	{RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
	{RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
	{RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
	{RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
	{RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
	{RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
	{RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
	{RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
	{RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
	{RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
	{RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
	{RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
	{RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
	{RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
	{RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
	{RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
	{RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
		    "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
	{R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
	{R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
	{R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
	{R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
	{R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
	{R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
	{R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
	{R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
	{R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
	{R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
	{R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
	{R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
	{R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
	{R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
	{R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
	{R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
	{R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
	{R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
	{R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
	{R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
	{R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
	{R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
	{R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
	{R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
	{R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
	{R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
	{R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
	{R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
	{R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
	 "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
	{R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
	{R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
	{R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
	{R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
	{R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
	{R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
	{R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
	{R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
	{R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
	{R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
	{R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
		    "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
	{R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"},	/* 61 */
	{R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
	{R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
	{R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
	{R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
	{R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
	{R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
	{R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
	{R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
	{R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
	{R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
	{R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
	{RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
	{RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
	{RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
	{R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
	{R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
	{RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
	{RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
	{RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
	{RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
	{RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
	{RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
	{R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
	{R200_PP_AFS_0, 32, "R200_PP_AFS_0"},     /* 85 */
	{R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
	{R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
	{R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
	{R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
	{R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
	{R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
	{R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
	{R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
	{R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"},
};

/* ================================================================
 * Performance monitoring functions
 */

static void radeon_clear_box(drm_radeon_private_t * dev_priv,
			     int x, int y, int w, int h, int r, int g, int b)
{
	u32 color;
	RING_LOCALS;

	x += dev_priv->sarea_priv->boxes[0].x1;
	y += dev_priv->sarea_priv->boxes[0].y1;

	switch (dev_priv->color_fmt) {
	case RADEON_COLOR_FORMAT_RGB565:
		color = (((r & 0xf8) << 8) |
			 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
		break;
	case RADEON_COLOR_FORMAT_ARGB8888:
	default:
		color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
		break;
	}

	BEGIN_RING(4);
	RADEON_WAIT_UNTIL_3D_IDLE();
	OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
	OUT_RING(0xffffffff);
	ADVANCE_RING();

	BEGIN_RING(6);

	OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4));
	OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
		 RADEON_GMC_BRUSH_SOLID_COLOR |
		 (dev_priv->color_fmt << 8) |
		 RADEON_GMC_SRC_DATATYPE_COLOR |
		 RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);

	if (dev_priv->sarea_priv->pfCurrentPage == 1) {
		OUT_RING(dev_priv->front_pitch_offset);
	} else {
		OUT_RING(dev_priv->back_pitch_offset);
	}

	OUT_RING(color);

	OUT_RING((x << 16) | y);
	OUT_RING((w << 16) | h);

	ADVANCE_RING();
}

static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
{
	/* Collapse various things into a wait flag -- trying to
	 * guess if userspase slept -- better just to have them tell us.
	 */
	if (dev_priv->stats.last_frame_reads > 1 ||
	    dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
	}

	if (dev_priv->stats.freelist_loops) {
		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
	}

	/* Purple box for page flipping
	 */
	if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
		radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255);

	/* Red box if we have to wait for idle at any point
	 */
	if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
		radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0);

	/* Blue box: lost context?
	 */

	/* Yellow box for texture swaps
	 */
	if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
		radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0);

	/* Green box if hardware never idles (as far as we can tell)
	 */
	if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
		radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);

	/* Draw bars indicating number of buffers allocated
	 * (not a great measure, easily confused)
	 */
	if (dev_priv->stats.requested_bufs) {
		if (dev_priv->stats.requested_bufs > 100)
			dev_priv->stats.requested_bufs = 100;

		radeon_clear_box(dev_priv, 4, 16,
				 dev_priv->stats.requested_bufs, 4,
				 196, 128, 128);
	}

	memset(&dev_priv->stats, 0, sizeof(dev_priv->stats));

}

/* ================================================================
 * CP command dispatch functions
 */

static void radeon_cp_dispatch_clear(struct drm_device * dev,
				     drm_radeon_clear_t * clear,
				     drm_radeon_clear_rect_t * depth_boxes)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
	drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
	int nbox = sarea_priv->nbox;
	struct drm_clip_rect *pbox = sarea_priv->boxes;
	unsigned int flags = clear->flags;
	u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
	int i;
	RING_LOCALS;
	DRM_DEBUG("flags = 0x%x\n", flags);

	dev_priv->stats.clears++;

	if (dev_priv->sarea_priv->pfCurrentPage == 1) {
		unsigned int tmp = flags;

		flags &= ~(RADEON_FRONT | RADEON_BACK);
		if (tmp & RADEON_FRONT)
			flags |= RADEON_BACK;
		if (tmp & RADEON_BACK)
			flags |= RADEON_FRONT;
	}

	if (flags & (RADEON_FRONT | RADEON_BACK)) {

		BEGIN_RING(4);

		/* Ensure the 3D stream is idle before doing a
		 * 2D fill to clear the front or back buffer.
		 */
		RADEON_WAIT_UNTIL_3D_IDLE();

		OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
		OUT_RING(clear->color_mask);

		ADVANCE_RING();

		/* Make sure we restore the 3D state next time.
		 */
		dev_priv->sarea_priv->ctx_owner = 0;

		for (i = 0; i < nbox; i++) {
			int x = pbox[i].x1;
			int y = pbox[i].y1;
			int w = pbox[i].x2 - x;
			int h = pbox[i].y2 - y;

			DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
				  x, y, w, h, flags);

			if (flags & RADEON_FRONT) {
				BEGIN_RING(6);

				OUT_RING(CP_PACKET3
					 (RADEON_CNTL_PAINT_MULTI, 4));
				OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
					 RADEON_GMC_BRUSH_SOLID_COLOR |
					 (dev_priv->
					  color_fmt << 8) |
					 RADEON_GMC_SRC_DATATYPE_COLOR |
					 RADEON_ROP3_P |
					 RADEON_GMC_CLR_CMP_CNTL_DIS);

				OUT_RING(dev_priv->front_pitch_offset);
				OUT_RING(clear->clear_color);

				OUT_RING((x << 16) | y);
				OUT_RING((w << 16) | h);

				ADVANCE_RING();
			}

			if (flags & RADEON_BACK) {
				BEGIN_RING(6);

				OUT_RING(CP_PACKET3
					 (RADEON_CNTL_PAINT_MULTI, 4));
				OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
					 RADEON_GMC_BRUSH_SOLID_COLOR |
					 (dev_priv->
					  color_fmt << 8) |
					 RADEON_GMC_SRC_DATATYPE_COLOR |
					 RADEON_ROP3_P |
					 RADEON_GMC_CLR_CMP_CNTL_DIS);

				OUT_RING(dev_priv->back_pitch_offset);
				OUT_RING(clear->clear_color);

				OUT_RING((x << 16) | y);
				OUT_RING((w << 16) | h);

				ADVANCE_RING();
			}
		}
	}

	/* hyper z clear */
	/* no docs available, based on reverse engeneering by Stephane Marchesin */
	if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
	    && (flags & RADEON_CLEAR_FASTZ)) {

		int i;
		int depthpixperline =
		    dev_priv->depth_fmt ==
		    RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
						       2) : (dev_priv->
							     depth_pitch / 4);

		u32 clearmask;

		u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
		    ((clear->depth_mask & 0xff) << 24);

		/* Make sure we restore the 3D state next time.
		 * we haven't touched any "normal" state - still need this?
		 */
		dev_priv->sarea_priv->ctx_owner = 0;

		if ((dev_priv->flags & RADEON_HAS_HIERZ)
		    && (flags & RADEON_USE_HIERZ)) {
			/* FIXME : reverse engineer that for Rx00 cards */
			/* FIXME : the mask supposedly contains low-res z values. So can't set
			   just to the max (0xff? or actually 0x3fff?), need to take z clear
			   value into account? */
			/* pattern seems to work for r100, though get slight
			   rendering errors with glxgears. If hierz is not enabled for r100,
			   only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
			   other ones are ignored, and the same clear mask can be used. That's
			   very different behaviour than R200 which needs different clear mask
			   and different number of tiles to clear if hierz is enabled or not !?!
			 */
			clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
		} else {
			/* clear mask : chooses the clearing pattern.
			   rv250: could be used to clear only parts of macrotiles
			   (but that would get really complicated...)?
			   bit 0 and 1 (either or both of them ?!?!) are used to
			   not clear tile (or maybe one of the bits indicates if the tile is
			   compressed or not), bit 2 and 3 to not clear tile 1,...,.
			   Pattern is as follows:
			   | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
			   bits -------------------------------------------------
			   | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
			   rv100: clearmask covers 2x8 4x1 tiles, but one clear still
			   covers 256 pixels ?!?
			 */
			clearmask = 0x0;
		}

		BEGIN_RING(8);
		RADEON_WAIT_UNTIL_2D_IDLE();
		OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
			     tempRB3D_DEPTHCLEARVALUE);
		/* what offset is this exactly ? */
		OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
		/* need ctlstat, otherwise get some strange black flickering */
		OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
			     RADEON_RB3D_ZC_FLUSH_ALL);
		ADVANCE_RING();

		for (i = 0; i < nbox; i++) {
			int tileoffset, nrtilesx, nrtilesy, j;
			/* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
			if ((dev_priv->flags & RADEON_HAS_HIERZ)
			    && !(dev_priv->microcode_version == UCODE_R200)) {
				/* FIXME : figure this out for r200 (when hierz is enabled). Or
				   maybe r200 actually doesn't need to put the low-res z value into
				   the tile cache like r100, but just needs to clear the hi-level z-buffer?
				   Works for R100, both with hierz and without.
				   R100 seems to operate on 2x1 8x8 tiles, but...
				   odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
				   problematic with resolutions which are not 64 pix aligned? */
				tileoffset =
				    ((pbox[i].y1 >> 3) * depthpixperline +
				     pbox[i].x1) >> 6;
				nrtilesx =
				    ((pbox[i].x2 & ~63) -
				     (pbox[i].x1 & ~63)) >> 4;
				nrtilesy =
				    (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
				for (j = 0; j <= nrtilesy; j++) {
					BEGIN_RING(4);
					OUT_RING(CP_PACKET3
						 (RADEON_3D_CLEAR_ZMASK, 2));
					/* first tile */
					OUT_RING(tileoffset * 8);
					/* the number of tiles to clear */
					OUT_RING(nrtilesx + 4);
					/* clear mask : chooses the clearing pattern. */
					OUT_RING(clearmask);
					ADVANCE_RING();
					tileoffset += depthpixperline >> 6;
				}
			} else if (dev_priv->microcode_version == UCODE_R200) {
				/* works for rv250. */
				/* find first macro tile (8x2 4x4 z-pixels on rv250) */
				tileoffset =
				    ((pbox[i].y1 >> 3) * depthpixperline +
				     pbox[i].x1) >> 5;
				nrtilesx =
				    (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
				nrtilesy =
				    (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
				for (j = 0; j <= nrtilesy; j++) {
					BEGIN_RING(4);
					OUT_RING(CP_PACKET3
						 (RADEON_3D_CLEAR_ZMASK, 2));
					/* first tile */
					/* judging by the first tile offset needed, could possibly
					   directly address/clear 4x4 tiles instead of 8x2 * 4x4
					   macro tiles, though would still need clear mask for
					   right/bottom if truely 4x4 granularity is desired ? */
					OUT_RING(tileoffset * 16);
					/* the number of tiles to clear */
					OUT_RING(nrtilesx + 1);
					/* clear mask : chooses the clearing pattern. */
					OUT_RING(clearmask);
					ADVANCE_RING();
					tileoffset += depthpixperline >> 5;
				}
			} else {	/* rv 100 */
				/* rv100 might not need 64 pix alignment, who knows */
				/* offsets are, hmm, weird */
				tileoffset =
				    ((pbox[i].y1 >> 4) * depthpixperline +
				     pbox[i].x1) >> 6;
				nrtilesx =
				    ((pbox[i].x2 & ~63) -
				     (pbox[i].x1 & ~63)) >> 4;
				nrtilesy =
				    (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
				for (j = 0; j <= nrtilesy; j++) {
					BEGIN_RING(4);
					OUT_RING(CP_PACKET3
						 (RADEON_3D_CLEAR_ZMASK, 2));
					OUT_RING(tileoffset * 128);
					/* the number of tiles to clear */
					OUT_RING(nrtilesx + 4);
					/* clear mask : chooses the clearing pattern. */
					OUT_RING(clearmask);
					ADVANCE_RING();
					tileoffset += depthpixperline >> 6;
				}
			}
		}

		/* TODO don't always clear all hi-level z tiles */
		if ((dev_priv->flags & RADEON_HAS_HIERZ)
		    && (dev_priv->microcode_version == UCODE_R200)
		    && (flags & RADEON_USE_HIERZ))
			/* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
			/* FIXME : the mask supposedly contains low-res z values. So can't set
			   just to the max (0xff? or actually 0x3fff?), need to take z clear
			   value into account? */
		{
			BEGIN_RING(4);
			OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
			OUT_RING(0x0);	/* First tile */
			OUT_RING(0x3cc0);
			OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
			ADVANCE_RING();
		}
	}

	/* We have to clear the depth and/or stencil buffers by
	 * rendering a quad into just those buffers.  Thus, we have to
	 * make sure the 3D engine is configured correctly.
	 */
	else if ((dev_priv->microcode_version == UCODE_R200) &&
		(flags & (RADEON_DEPTH | RADEON_STENCIL))) {

		int tempPP_CNTL;
		int tempRE_CNTL;
		int tempRB3D_CNTL;
		int tempRB3D_ZSTENCILCNTL;
		int tempRB3D_STENCILREFMASK;
		int tempRB3D_PLANEMASK;
		int tempSE_CNTL;
		int tempSE_VTE_CNTL;
		int tempSE_VTX_FMT_0;
		int tempSE_VTX_FMT_1;
		int tempSE_VAP_CNTL;
		int tempRE_AUX_SCISSOR_CNTL;

		tempPP_CNTL = 0;
		tempRE_CNTL = 0;

		tempRB3D_CNTL = depth_clear->rb3d_cntl;

		tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
		tempRB3D_STENCILREFMASK = 0x0;

		tempSE_CNTL = depth_clear->se_cntl;

		/* Disable TCL */

		tempSE_VAP_CNTL = (	/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK |  */
					  (0x9 <<
					   SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));

		tempRB3D_PLANEMASK = 0x0;

		tempRE_AUX_SCISSOR_CNTL = 0x0;

		tempSE_VTE_CNTL =
		    SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;

		/* Vertex format (X, Y, Z, W) */
		tempSE_VTX_FMT_0 =
		    SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
		    SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
		tempSE_VTX_FMT_1 = 0x0;

		/*
		 * Depth buffer specific enables
		 */
		if (flags & RADEON_DEPTH) {
			/* Enable depth buffer */
			tempRB3D_CNTL |= RADEON_Z_ENABLE;
		} else {
			/* Disable depth buffer */
			tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
		}

		/*
		 * Stencil buffer specific enables
		 */
		if (flags & RADEON_STENCIL) {
			tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
			tempRB3D_STENCILREFMASK = clear->depth_mask;
		} else {
			tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
			tempRB3D_STENCILREFMASK = 0x00000000;
		}

		if (flags & RADEON_USE_COMP_ZBUF) {
			tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
			    RADEON_Z_DECOMPRESSION_ENABLE;
		}
		if (flags & RADEON_USE_HIERZ) {
			tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
		}

		BEGIN_RING(26);
		RADEON_WAIT_UNTIL_2D_IDLE();

		OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
		OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
		OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
		OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
		OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
			     tempRB3D_STENCILREFMASK);
		OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
		OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL);
		OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL);
		OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
		OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
		OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
		OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
		ADVANCE_RING();

		/* Make sure we restore the 3D state next time.
		 */
		dev_priv->sarea_priv->ctx_owner = 0;

		for (i = 0; i < nbox; i++) {

			/* Funny that this should be required --
			 *  sets top-left?
			 */
			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);

			BEGIN_RING(14);
			OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
			OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
				  RADEON_PRIM_WALK_RING |
				  (3 << RADEON_NUM_VERTICES_SHIFT)));
			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
			OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
			OUT_RING(0x3f800000);
			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
			OUT_RING(0x3f800000);
			OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
			OUT_RING(0x3f800000);
			ADVANCE_RING();
		}
	} else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {

		int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;

		rb3d_cntl = depth_clear->rb3d_cntl;

		if (flags & RADEON_DEPTH) {
			rb3d_cntl |= RADEON_Z_ENABLE;
		} else {
			rb3d_cntl &= ~RADEON_Z_ENABLE;
		}

		if (flags & RADEON_STENCIL) {
			rb3d_cntl |= RADEON_STENCIL_ENABLE;
			rb3d_stencilrefmask = clear->depth_mask;	/* misnamed field */
		} else {
			rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
			rb3d_stencilrefmask = 0x00000000;
		}

		if (flags & RADEON_USE_COMP_ZBUF) {
			tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
			    RADEON_Z_DECOMPRESSION_ENABLE;
		}
		if (flags & RADEON_USE_HIERZ) {
			tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
		}

		BEGIN_RING(13);
		RADEON_WAIT_UNTIL_2D_IDLE();

		OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1));
		OUT_RING(0x00000000);
		OUT_RING(rb3d_cntl);

		OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
		OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
		OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
		OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
		ADVANCE_RING();

		/* Make sure we restore the 3D state next time.
		 */
		dev_priv->sarea_priv->ctx_owner = 0;

		for (i = 0; i < nbox; i++) {

			/* Funny that this should be required --
			 *  sets top-left?
			 */
			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);

			BEGIN_RING(15);

			OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13));
			OUT_RING(RADEON_VTX_Z_PRESENT |
				 RADEON_VTX_PKCOLOR_PRESENT);
			OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
				  RADEON_PRIM_WALK_RING |
				  RADEON_MAOS_ENABLE |
				  RADEON_VTX_FMT_RADEON_MODE |
				  (3 << RADEON_NUM_VERTICES_SHIFT)));

			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
			OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
			OUT_RING(0x0);

			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
			OUT_RING(0x0);

			OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
			OUT_RING(0x0);

			ADVANCE_RING();
		}
	}

	/* Increment the clear counter.  The client-side 3D driver must
	 * wait on this value before performing the clear ioctl.  We
	 * need this because the card's so damned fast...
	 */
	dev_priv->sarea_priv->last_clear++;

	BEGIN_RING(4);

	RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear);
	RADEON_WAIT_UNTIL_IDLE();

	ADVANCE_RING();
}

static void radeon_cp_dispatch_swap(struct drm_device * dev)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
	int nbox = sarea_priv->nbox;
	struct drm_clip_rect *pbox = sarea_priv->boxes;
	int i;
	RING_LOCALS;
	DRM_DEBUG("\n");

	/* Do some trivial performance monitoring...
	 */
	if (dev_priv->do_boxes)
		radeon_cp_performance_boxes(dev_priv);

	/* Wait for the 3D stream to idle before dispatching the bitblt.
	 * This will prevent data corruption between the two streams.
	 */
	BEGIN_RING(2);

	RADEON_WAIT_UNTIL_3D_IDLE();

	ADVANCE_RING();

	for (i = 0; i < nbox; i++) {
		int x = pbox[i].x1;
		int y = pbox[i].y1;
		int w = pbox[i].x2 - x;
		int h = pbox[i].y2 - y;

		DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);

		BEGIN_RING(9);

		OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
		OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
			 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
			 RADEON_GMC_BRUSH_NONE |
			 (dev_priv->color_fmt << 8) |
			 RADEON_GMC_SRC_DATATYPE_COLOR |
			 RADEON_ROP3_S |
			 RADEON_DP_SRC_SOURCE_MEMORY |
			 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);

		/* Make this work even if front & back are flipped:
		 */
		OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
		if (dev_priv->sarea_priv->pfCurrentPage == 0) {
			OUT_RING(dev_priv->back_pitch_offset);
			OUT_RING(dev_priv->front_pitch_offset);
		} else {
			OUT_RING(dev_priv->front_pitch_offset);
			OUT_RING(dev_priv->back_pitch_offset);
		}

		OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
		OUT_RING((x << 16) | y);
		OUT_RING((x << 16) | y);
		OUT_RING((w << 16) | h);

		ADVANCE_RING();
	}

	/* Increment the frame counter.  The client-side 3D driver must
	 * throttle the framerate by waiting for this value before
	 * performing the swapbuffer ioctl.
	 */
	dev_priv->sarea_priv->last_frame++;

	BEGIN_RING(4);

	RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
	RADEON_WAIT_UNTIL_2D_IDLE();

	ADVANCE_RING();
}

static void radeon_cp_dispatch_flip(struct drm_device * dev)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle;
	int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
	    ? dev_priv->front_offset : dev_priv->back_offset;
	RING_LOCALS;
	DRM_DEBUG("pfCurrentPage=%d\n",
		  dev_priv->sarea_priv->pfCurrentPage);

	/* Do some trivial performance monitoring...
	 */
	if (dev_priv->do_boxes) {
		dev_priv->stats.boxes |= RADEON_BOX_FLIP;
		radeon_cp_performance_boxes(dev_priv);
	}

	/* Update the frame offsets for both CRTCs
	 */
	BEGIN_RING(6);

	RADEON_WAIT_UNTIL_3D_IDLE();
	OUT_RING_REG(RADEON_CRTC_OFFSET,
		     ((sarea->frame.y * dev_priv->front_pitch +
		       sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
		     + offset);
	OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
		     + offset);

	ADVANCE_RING();

	/* Increment the frame counter.  The client-side 3D driver must
	 * throttle the framerate by waiting for this value before
	 * performing the swapbuffer ioctl.
	 */
	dev_priv->sarea_priv->last_frame++;
	dev_priv->sarea_priv->pfCurrentPage =
		1 - dev_priv->sarea_priv->pfCurrentPage;

	BEGIN_RING(2);

	RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);

	ADVANCE_RING();
}

static int bad_prim_vertex_nr(int primitive, int nr)
{
	switch (primitive & RADEON_PRIM_TYPE_MASK) {
	case RADEON_PRIM_TYPE_NONE:
	case RADEON_PRIM_TYPE_POINT:
		return nr < 1;
	case RADEON_PRIM_TYPE_LINE:
		return (nr & 1) || nr == 0;
	case RADEON_PRIM_TYPE_LINE_STRIP:
		return nr < 2;
	case RADEON_PRIM_TYPE_TRI_LIST:
	case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
	case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
	case RADEON_PRIM_TYPE_RECT_LIST:
		return nr % 3 || nr == 0;
	case RADEON_PRIM_TYPE_TRI_FAN:
	case RADEON_PRIM_TYPE_TRI_STRIP:
		return nr < 3;
	default:
		return 1;
	}
}

typedef struct {
	unsigned int start;
	unsigned int finish;
	unsigned int prim;
	unsigned int numverts;
	unsigned int offset;
	unsigned int vc_format;
} drm_radeon_tcl_prim_t;

static void radeon_cp_dispatch_vertex(struct drm_device * dev,
				      struct drm_buf * buf,
				      drm_radeon_tcl_prim_t * prim)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
	int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
	int numverts = (int)prim->numverts;
	int nbox = sarea_priv->nbox;
	int i = 0;
	RING_LOCALS;

	DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
		  prim->prim,
		  prim->vc_format, prim->start, prim->finish, prim->numverts);

	if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
		DRM_ERROR("bad prim %x numverts %d\n",
			  prim->prim, prim->numverts);
		return;
	}

	do {
		/* Emit the next cliprect */
		if (i < nbox) {
			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
		}

		/* Emit the vertex buffer rendering commands */
		BEGIN_RING(5);

		OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3));
		OUT_RING(offset);
		OUT_RING(numverts);
		OUT_RING(prim->vc_format);
		OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST |
			 RADEON_COLOR_ORDER_RGBA |
			 RADEON_VTX_FMT_RADEON_MODE |
			 (numverts << RADEON_NUM_VERTICES_SHIFT));

		ADVANCE_RING();

		i++;
	} while (i < nbox);
}

static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
	RING_LOCALS;

	buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;

	/* Emit the vertex buffer age */
	BEGIN_RING(2);
	RADEON_DISPATCH_AGE(buf_priv->age);
	ADVANCE_RING();

	buf->pending = 1;
	buf->used = 0;
}

static void radeon_cp_dispatch_indirect(struct drm_device * dev,
					struct drm_buf * buf, int start, int end)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	RING_LOCALS;
	DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);

	if (start != end) {
		int offset = (dev_priv->gart_buffers_offset
			      + buf->offset + start);
		int dwords = (end - start + 3) / sizeof(u32);

		/* Indirect buffer data must be an even number of
		 * dwords, so if we've been given an odd number we must
		 * pad the data with a Type-2 CP packet.
		 */
		if (dwords & 1) {
			u32 *data = (u32 *)
			    ((char *)dev->agp_buffer_map->handle
			     + buf->offset + start);
			data[dwords++] = RADEON_CP_PACKET2;
		}

		/* Fire off the indirect buffer */
		BEGIN_RING(3);

		OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
		OUT_RING(offset);
		OUT_RING(dwords);

		ADVANCE_RING();
	}
}

static void radeon_cp_dispatch_indices(struct drm_device * dev,
				       struct drm_buf * elt_buf,
				       drm_radeon_tcl_prim_t * prim)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
	int offset = dev_priv->gart_buffers_offset + prim->offset;
	u32 *data;
	int dwords;
	int i = 0;
	int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
	int count = (prim->finish - start) / sizeof(u16);
	int nbox = sarea_priv->nbox;

	DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
		  prim->prim,
		  prim->vc_format,
		  prim->start, prim->finish, prim->offset, prim->numverts);

	if (bad_prim_vertex_nr(prim->prim, count)) {
		DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
		return;
	}

	if (start >= prim->finish || (prim->start & 0x7)) {
		DRM_ERROR("buffer prim %d\n", prim->prim);
		return;
	}

	dwords = (prim->finish - prim->start + 3) / sizeof(u32);

	data = (u32 *) ((char *)dev->agp_buffer_map->handle +
			elt_buf->offset + prim->start);

	data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
	data[1] = offset;
	data[2] = prim->numverts;
	data[3] = prim->vc_format;
	data[4] = (prim->prim |
		   RADEON_PRIM_WALK_IND |
		   RADEON_COLOR_ORDER_RGBA |
		   RADEON_VTX_FMT_RADEON_MODE |
		   (count << RADEON_NUM_VERTICES_SHIFT));

	do {
		if (i < nbox)
			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);

		radeon_cp_dispatch_indirect(dev, elt_buf,
					    prim->start, prim->finish);

		i++;
	} while (i < nbox);

}

#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE

static int radeon_cp_dispatch_texture(struct drm_device * dev,
				      struct drm_file *file_priv,
				      drm_radeon_texture_t * tex,
				      drm_radeon_tex_image_t * image)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	struct drm_buf *buf;
	u32 format;
	u32 *buffer;
	const u8 __user *data;
	int size, dwords, tex_width, blit_width, spitch;
	u32 height;
	int i;
	u32 texpitch, microtile;
	u32 offset, byte_offset;
	RING_LOCALS;

	if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
		DRM_ERROR("Invalid destination offset\n");
		return -EINVAL;
	}

	dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;

	/* Flush the pixel cache.  This ensures no pixel data gets mixed
	 * up with the texture data from the host data blit, otherwise
	 * part of the texture image may be corrupted.
	 */
	BEGIN_RING(4);
	RADEON_FLUSH_CACHE();
	RADEON_WAIT_UNTIL_IDLE();
	ADVANCE_RING();

	/* The compiler won't optimize away a division by a variable,
	 * even if the only legal values are powers of two.  Thus, we'll
	 * use a shift instead.
	 */
	switch (tex->format) {
	case RADEON_TXFORMAT_ARGB8888:
	case RADEON_TXFORMAT_RGBA8888:
		format = RADEON_COLOR_FORMAT_ARGB8888;
		tex_width = tex->width * 4;
		blit_width = image->width * 4;
		break;
	case RADEON_TXFORMAT_AI88:
	case RADEON_TXFORMAT_ARGB1555:
	case RADEON_TXFORMAT_RGB565:
	case RADEON_TXFORMAT_ARGB4444:
	case RADEON_TXFORMAT_VYUY422:
	case RADEON_TXFORMAT_YVYU422:
		format = RADEON_COLOR_FORMAT_RGB565;
		tex_width = tex->width * 2;
		blit_width = image->width * 2;
		break;
	case RADEON_TXFORMAT_I8:
	case RADEON_TXFORMAT_RGB332:
		format = RADEON_COLOR_FORMAT_CI8;
		tex_width = tex->width * 1;
		blit_width = image->width * 1;
		break;
	default:
		DRM_ERROR("invalid texture format %d\n", tex->format);
		return -EINVAL;
	}
	spitch = blit_width >> 6;
	if (spitch == 0 && image->height > 1)
		return -EINVAL;

	texpitch = tex->pitch;
	if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
		microtile = 1;
		if (tex_width < 64) {
			texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
			/* we got tiled coordinates, untile them */
			image->x *= 2;
		}
	} else
		microtile = 0;

	/* this might fail for zero-sized uploads - are those illegal? */
	if (!radeon_check_offset(dev_priv, tex->offset + image->height *
				blit_width - 1)) {
		DRM_ERROR("Invalid final destination offset\n");
		return -EINVAL;
	}

	DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);

	do {
		DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
			  tex->offset >> 10, tex->pitch, tex->format,
			  image->x, image->y, image->width, image->height);

		/* Make a copy of some parameters in case we have to
		 * update them for a multi-pass texture blit.
		 */
		height = image->height;
		data = (const u8 __user *)image->data;

		size = height * blit_width;

		if (size > RADEON_MAX_TEXTURE_SIZE) {
			height = RADEON_MAX_TEXTURE_SIZE / blit_width;
			size = height * blit_width;
		} else if (size < 4 && size > 0) {
			size = 4;
		} else if (size == 0) {
			return 0;
		}

		buf = radeon_freelist_get(dev);
		if (0 && !buf) {
			radeon_do_cp_idle(dev_priv);
			buf = radeon_freelist_get(dev);
		}
		if (!buf) {
			DRM_DEBUG("EAGAIN\n");
			if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
				return -EFAULT;
			return -EAGAIN;
		}

		/* Dispatch the indirect buffer.
		 */
		buffer =
		    (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
		dwords = size / 4;

#define RADEON_COPY_MT(_buf, _data, _width) \
	do { \
		if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
			DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
			return -EFAULT; \
		} \
	} while(0)

		if (microtile) {
			/* texture micro tiling in use, minimum texture width is thus 16 bytes.
			   however, we cannot use blitter directly for texture width < 64 bytes,
			   since minimum tex pitch is 64 bytes and we need this to match
			   the texture width, otherwise the blitter will tile it wrong.
			   Thus, tiling manually in this case. Additionally, need to special
			   case tex height = 1, since our actual image will have height 2
			   and we need to ensure we don't read beyond the texture size
			   from user space. */
			if (tex->height == 1) {
				if (tex_width >= 64 || tex_width <= 16) {
					RADEON_COPY_MT(buffer, data,
						(int)(tex_width * sizeof(u32)));
				} else if (tex_width == 32) {
					RADEON_COPY_MT(buffer, data, 16);
					RADEON_COPY_MT(buffer + 8,
						       data + 16, 16);
				}
			} else if (tex_width >= 64 || tex_width == 16) {
				RADEON_COPY_MT(buffer, data,
					       (int)(dwords * sizeof(u32)));
			} else if (tex_width < 16) {
				for (i = 0; i < tex->height; i++) {
					RADEON_COPY_MT(buffer, data, tex_width);
					buffer += 4;
					data += tex_width;
				}
			} else if (tex_width == 32) {
				/* TODO: make sure this works when not fitting in one buffer
				   (i.e. 32bytes x 2048...) */
				for (i = 0; i < tex->height; i += 2) {
					RADEON_COPY_MT(buffer, data, 16);
					data += 16;
					RADEON_COPY_MT(buffer + 8, data, 16);
					data += 16;
					RADEON_COPY_MT(buffer + 4, data, 16);
					data += 16;
					RADEON_COPY_MT(buffer + 12, data, 16);
					data += 16;
					buffer += 16;
				}
			}
		} else {
			if (tex_width >= 32) {
				/* Texture image width is larger than the minimum, so we
				 * can upload it directly.
				 */
				RADEON_COPY_MT(buffer, data,
					       (int)(dwords * sizeof(u32)));
			} else {
				/* Texture image width is less than the minimum, so we
				 * need to pad out each image scanline to the minimum
				 * width.
				 */
				for (i = 0; i < tex->height; i++) {
					RADEON_COPY_MT(buffer, data, tex_width);
					buffer += 8;
					data += tex_width;
				}
			}
		}

#undef RADEON_COPY_MT
		byte_offset = (image->y & ~2047) * blit_width;
		buf->file_priv = file_priv;
		buf->used = size;
		offset = dev_priv->gart_buffers_offset + buf->offset;
		BEGIN_RING(9);
		OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
		OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
			 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
			 RADEON_GMC_BRUSH_NONE |
			 (format << 8) |
			 RADEON_GMC_SRC_DATATYPE_COLOR |
			 RADEON_ROP3_S |
			 RADEON_DP_SRC_SOURCE_MEMORY |
			 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
		OUT_RING((spitch << 22) | (offset >> 10));
		OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10)));
		OUT_RING(0);
		OUT_RING((image->x << 16) | (image->y % 2048));
		OUT_RING((image->width << 16) | height);
		RADEON_WAIT_UNTIL_2D_IDLE();
		ADVANCE_RING();
		COMMIT_RING();

		radeon_cp_discard_buffer(dev, buf);

		/* Update the input parameters for next time */
		image->y += height;
		image->height -= height;
		image->data = (const u8 __user *)image->data + size;
	} while (image->height > 0);

	/* Flush the pixel cache after the blit completes.  This ensures
	 * the texture data is written out to memory before rendering
	 * continues.
	 */
	BEGIN_RING(4);
	RADEON_FLUSH_CACHE();
	RADEON_WAIT_UNTIL_2D_IDLE();
	ADVANCE_RING();
	COMMIT_RING();

	return 0;
}

static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	int i;
	RING_LOCALS;
	DRM_DEBUG("\n");

	BEGIN_RING(35);

	OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0));
	OUT_RING(0x00000000);

	OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31));
	for (i = 0; i < 32; i++) {
		OUT_RING(stipple[i]);
	}

	ADVANCE_RING();
}

static void radeon_apply_surface_regs(int surf_index,
				      drm_radeon_private_t *dev_priv)
{
	if (!dev_priv->mmio)
		return;

	radeon_do_cp_idle(dev_priv);

	RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
		     dev_priv->surfaces[surf_index].flags);
	RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
		     dev_priv->surfaces[surf_index].lower);
	RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
		     dev_priv->surfaces[surf_index].upper);
}

/* Allocates a virtual surface
 * doesn't always allocate a real surface, will stretch an existing
 * surface when possible.
 *
 * Note that refcount can be at most 2, since during a free refcount=3
 * might mean we have to allocate a new surface which might not always
 * be available.
 * For example : we allocate three contigous surfaces ABC. If B is
 * freed, we suddenly need two surfaces to store A and C, which might
 * not always be available.
 */
static int alloc_surface(drm_radeon_surface_alloc_t *new,
			 drm_radeon_private_t *dev_priv,
			 struct drm_file *file_priv)
{
	struct radeon_virt_surface *s;
	int i;
	int virt_surface_index;
	uint32_t new_upper, new_lower;

	new_lower = new->address;
	new_upper = new_lower + new->size - 1;

	/* sanity check */
	if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
	    ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
	     RADEON_SURF_ADDRESS_FIXED_MASK)
	    || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
		return -1;

	/* make sure there is no overlap with existing surfaces */
	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
		if ((dev_priv->surfaces[i].refcount != 0) &&
		    (((new_lower >= dev_priv->surfaces[i].lower) &&
		      (new_lower < dev_priv->surfaces[i].upper)) ||
		     ((new_lower < dev_priv->surfaces[i].lower) &&
		      (new_upper > dev_priv->surfaces[i].lower)))) {
			return -1;
		}
	}

	/* find a virtual surface */
	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
		if (dev_priv->virt_surfaces[i].file_priv == 0)
			break;
	if (i == 2 * RADEON_MAX_SURFACES) {
		return -1;
	}
	virt_surface_index = i;

	/* try to reuse an existing surface */
	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
		/* extend before */
		if ((dev_priv->surfaces[i].refcount == 1) &&
		    (new->flags == dev_priv->surfaces[i].flags) &&
		    (new_upper + 1 == dev_priv->surfaces[i].lower)) {
			s = &(dev_priv->virt_surfaces[virt_surface_index]);
			s->surface_index = i;
			s->lower = new_lower;
			s->upper = new_upper;
			s->flags = new->flags;
			s->file_priv = file_priv;
			dev_priv->surfaces[i].refcount++;
			dev_priv->surfaces[i].lower = s->lower;
			radeon_apply_surface_regs(s->surface_index, dev_priv);
			return virt_surface_index;
		}

		/* extend after */
		if ((dev_priv->surfaces[i].refcount == 1) &&
		    (new->flags == dev_priv->surfaces[i].flags) &&
		    (new_lower == dev_priv->surfaces[i].upper + 1)) {
			s = &(dev_priv->virt_surfaces[virt_surface_index]);
			s->surface_index = i;
			s->lower = new_lower;
			s->upper = new_upper;
			s->flags = new->flags;
			s->file_priv = file_priv;
			dev_priv->surfaces[i].refcount++;
			dev_priv->surfaces[i].upper = s->upper;
			radeon_apply_surface_regs(s->surface_index, dev_priv);
			return virt_surface_index;
		}
	}

	/* okay, we need a new one */
	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
		if (dev_priv->surfaces[i].refcount == 0) {
			s = &(dev_priv->virt_surfaces[virt_surface_index]);
			s->surface_index = i;
			s->lower = new_lower;
			s->upper = new_upper;
			s->flags = new->flags;
			s->file_priv = file_priv;
			dev_priv->surfaces[i].refcount = 1;
			dev_priv->surfaces[i].lower = s->lower;
			dev_priv->surfaces[i].upper = s->upper;
			dev_priv->surfaces[i].flags = s->flags;
			radeon_apply_surface_regs(s->surface_index, dev_priv);
			return virt_surface_index;
		}
	}

	/* we didn't find anything */
	return -1;
}

static int free_surface(struct drm_file *file_priv,
			drm_radeon_private_t * dev_priv,
			int lower)
{
	struct radeon_virt_surface *s;
	int i;
	/* find the virtual surface */
	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
		s = &(dev_priv->virt_surfaces[i]);
		if (s->file_priv) {
			if ((lower == s->lower) && (file_priv == s->file_priv))
			{
				if (dev_priv->surfaces[s->surface_index].
				    lower == s->lower)
					dev_priv->surfaces[s->surface_index].
					    lower = s->upper;

				if (dev_priv->surfaces[s->surface_index].
				    upper == s->upper)
					dev_priv->surfaces[s->surface_index].
					    upper = s->lower;

				dev_priv->surfaces[s->surface_index].refcount--;
				if (dev_priv->surfaces[s->surface_index].
				    refcount == 0)
					dev_priv->surfaces[s->surface_index].
					    flags = 0;
				s->file_priv = NULL;
				radeon_apply_surface_regs(s->surface_index,
							  dev_priv);
				return 0;
			}
		}
	}
	return 1;
}

static void radeon_surfaces_release(struct drm_file *file_priv,
				    drm_radeon_private_t * dev_priv)
{
	int i;
	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
		if (dev_priv->virt_surfaces[i].file_priv == file_priv)
			free_surface(file_priv, dev_priv,
				     dev_priv->virt_surfaces[i].lower);
	}
}

/* ================================================================
 * IOCTL functions
 */
static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_surface_alloc_t *alloc = data;

	if (!dev_priv) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}

	if (alloc_surface(alloc, dev_priv, file_priv) == -1)
		return -EINVAL;
	else
		return 0;
}

static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_surface_free_t *memfree = data;

	if (!dev_priv) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}

	if (free_surface(file_priv, dev_priv, memfree->address))
		return -EINVAL;
	else
		return 0;
}

static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
	drm_radeon_clear_t *clear = data;
	drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
	DRM_DEBUG("\n");

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	RING_SPACE_TEST_WITH_RETURN(dev_priv);

	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;

	if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
			       sarea_priv->nbox * sizeof(depth_boxes[0])))
		return -EFAULT;

	radeon_cp_dispatch_clear(dev, clear, depth_boxes);

	COMMIT_RING();
	return 0;
}

/* Not sure why this isn't set all the time:
 */
static int radeon_do_init_pageflip(struct drm_device * dev)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	RING_LOCALS;

	DRM_DEBUG("\n");

	BEGIN_RING(6);
	RADEON_WAIT_UNTIL_3D_IDLE();
	OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
	OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
		 RADEON_CRTC_OFFSET_FLIP_CNTL);
	OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
	OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
		 RADEON_CRTC_OFFSET_FLIP_CNTL);
	ADVANCE_RING();

	dev_priv->page_flipping = 1;

	if (dev_priv->sarea_priv->pfCurrentPage != 1)
		dev_priv->sarea_priv->pfCurrentPage = 0;

	return 0;
}

/* Swapping and flipping are different operations, need different ioctls.
 * They can & should be intermixed to support multiple 3d windows.
 */
static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	DRM_DEBUG("\n");

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	RING_SPACE_TEST_WITH_RETURN(dev_priv);

	if (!dev_priv->page_flipping)
		radeon_do_init_pageflip(dev);

	radeon_cp_dispatch_flip(dev);

	COMMIT_RING();
	return 0;
}

static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
	DRM_DEBUG("\n");

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	RING_SPACE_TEST_WITH_RETURN(dev_priv);

	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;

	radeon_cp_dispatch_swap(dev);
	dev_priv->sarea_priv->ctx_owner = 0;

	COMMIT_RING();
	return 0;
}

static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_sarea_t *sarea_priv;
	struct drm_device_dma *dma = dev->dma;
	struct drm_buf *buf;
	drm_radeon_vertex_t *vertex = data;
	drm_radeon_tcl_prim_t prim;

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	if (!dev_priv) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}

	sarea_priv = dev_priv->sarea_priv;

	DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
		  DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);

	if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
		DRM_ERROR("buffer index %d (of %d max)\n",
			  vertex->idx, dma->buf_count - 1);
		return -EINVAL;
	}
	if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
		DRM_ERROR("buffer prim %d\n", vertex->prim);
		return -EINVAL;
	}

	RING_SPACE_TEST_WITH_RETURN(dev_priv);
	VB_AGE_TEST_WITH_RETURN(dev_priv);

	buf = dma->buflist[vertex->idx];

	if (buf->file_priv != file_priv) {
		DRM_ERROR("process %d using buffer owned by %p\n",
			  DRM_CURRENTPID, buf->file_priv);
		return -EINVAL;
	}
	if (buf->pending) {
		DRM_ERROR("sending pending buffer %d\n", vertex->idx);
		return -EINVAL;
	}

	/* Build up a prim_t record:
	 */
	if (vertex->count) {
		buf->used = vertex->count;	/* not used? */

		if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
			if (radeon_emit_state(dev_priv, file_priv,
					      &sarea_priv->context_state,
					      sarea_priv->tex_state,
					      sarea_priv->dirty)) {
				DRM_ERROR("radeon_emit_state failed\n");
				return -EINVAL;
			}

			sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
					       RADEON_UPLOAD_TEX1IMAGES |
					       RADEON_UPLOAD_TEX2IMAGES |
					       RADEON_REQUIRE_QUIESCENCE);
		}

		prim.start = 0;
		prim.finish = vertex->count;	/* unused */
		prim.prim = vertex->prim;
		prim.numverts = vertex->count;
		prim.vc_format = dev_priv->sarea_priv->vc_format;

		radeon_cp_dispatch_vertex(dev, buf, &prim);
	}

	if (vertex->discard) {
		radeon_cp_discard_buffer(dev, buf);
	}

	COMMIT_RING();
	return 0;
}

static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_sarea_t *sarea_priv;
	struct drm_device_dma *dma = dev->dma;
	struct drm_buf *buf;
	drm_radeon_indices_t *elts = data;
	drm_radeon_tcl_prim_t prim;
	int count;

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	if (!dev_priv) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}
	sarea_priv = dev_priv->sarea_priv;

	DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
		  DRM_CURRENTPID, elts->idx, elts->start, elts->end,
		  elts->discard);

	if (elts->idx < 0 || elts->idx >= dma->buf_count) {
		DRM_ERROR("buffer index %d (of %d max)\n",
			  elts->idx, dma->buf_count - 1);
		return -EINVAL;
	}
	if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
		DRM_ERROR("buffer prim %d\n", elts->prim);
		return -EINVAL;
	}

	RING_SPACE_TEST_WITH_RETURN(dev_priv);
	VB_AGE_TEST_WITH_RETURN(dev_priv);

	buf = dma->buflist[elts->idx];

	if (buf->file_priv != file_priv) {
		DRM_ERROR("process %d using buffer owned by %p\n",
			  DRM_CURRENTPID, buf->file_priv);
		return -EINVAL;
	}
	if (buf->pending) {
		DRM_ERROR("sending pending buffer %d\n", elts->idx);
		return -EINVAL;
	}

	count = (elts->end - elts->start) / sizeof(u16);
	elts->start -= RADEON_INDEX_PRIM_OFFSET;

	if (elts->start & 0x7) {
		DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
		return -EINVAL;
	}
	if (elts->start < buf->used) {
		DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
		return -EINVAL;
	}

	buf->used = elts->end;

	if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
		if (radeon_emit_state(dev_priv, file_priv,
				      &sarea_priv->context_state,
				      sarea_priv->tex_state,
				      sarea_priv->dirty)) {
			DRM_ERROR("radeon_emit_state failed\n");
			return -EINVAL;
		}

		sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
				       RADEON_UPLOAD_TEX1IMAGES |
				       RADEON_UPLOAD_TEX2IMAGES |
				       RADEON_REQUIRE_QUIESCENCE);
	}

	/* Build up a prim_t record:
	 */
	prim.start = elts->start;
	prim.finish = elts->end;
	prim.prim = elts->prim;
	prim.offset = 0;	/* offset from start of dma buffers */
	prim.numverts = RADEON_MAX_VB_VERTS;	/* duh */
	prim.vc_format = dev_priv->sarea_priv->vc_format;

	radeon_cp_dispatch_indices(dev, buf, &prim);
	if (elts->discard) {
		radeon_cp_discard_buffer(dev, buf);
	}

	COMMIT_RING();
	return 0;
}

static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_texture_t *tex = data;
	drm_radeon_tex_image_t image;
	int ret;

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	if (tex->image == NULL) {
		DRM_ERROR("null texture image!\n");
		return -EINVAL;
	}

	if (DRM_COPY_FROM_USER(&image,
			       (drm_radeon_tex_image_t __user *) tex->image,
			       sizeof(image)))
		return -EFAULT;

	RING_SPACE_TEST_WITH_RETURN(dev_priv);
	VB_AGE_TEST_WITH_RETURN(dev_priv);

	ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);

	return ret;
}

static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_stipple_t *stipple = data;
	u32 mask[32];

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
		return -EFAULT;

	RING_SPACE_TEST_WITH_RETURN(dev_priv);

	radeon_cp_dispatch_stipple(dev, mask);

	COMMIT_RING();
	return 0;
}

static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	struct drm_device_dma *dma = dev->dma;
	struct drm_buf *buf;
	drm_radeon_indirect_t *indirect = data;
	RING_LOCALS;

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	if (!dev_priv) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}

	DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
		  indirect->idx, indirect->start, indirect->end,
		  indirect->discard);

	if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
		DRM_ERROR("buffer index %d (of %d max)\n",
			  indirect->idx, dma->buf_count - 1);
		return -EINVAL;
	}

	buf = dma->buflist[indirect->idx];

	if (buf->file_priv != file_priv) {
		DRM_ERROR("process %d using buffer owned by %p\n",
			  DRM_CURRENTPID, buf->file_priv);
		return -EINVAL;
	}
	if (buf->pending) {
		DRM_ERROR("sending pending buffer %d\n", indirect->idx);
		return -EINVAL;
	}

	if (indirect->start < buf->used) {
		DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
			  indirect->start, buf->used);
		return -EINVAL;
	}

	RING_SPACE_TEST_WITH_RETURN(dev_priv);
	VB_AGE_TEST_WITH_RETURN(dev_priv);

	buf->used = indirect->end;

	/* Wait for the 3D stream to idle before the indirect buffer
	 * containing 2D acceleration commands is processed.
	 */
	BEGIN_RING(2);

	RADEON_WAIT_UNTIL_3D_IDLE();

	ADVANCE_RING();

	/* Dispatch the indirect buffer full of commands from the
	 * X server.  This is insecure and is thus only available to
	 * privileged clients.
	 */
	radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
	if (indirect->discard) {
		radeon_cp_discard_buffer(dev, buf);
	}

	COMMIT_RING();
	return 0;
}

static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_sarea_t *sarea_priv;
	struct drm_device_dma *dma = dev->dma;
	struct drm_buf *buf;
	drm_radeon_vertex2_t *vertex = data;
	int i;
	unsigned char laststate;

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	if (!dev_priv) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}

	sarea_priv = dev_priv->sarea_priv;

	DRM_DEBUG("pid=%d index=%d discard=%d\n",
		  DRM_CURRENTPID, vertex->idx, vertex->discard);

	if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
		DRM_ERROR("buffer index %d (of %d max)\n",
			  vertex->idx, dma->buf_count - 1);
		return -EINVAL;
	}

	RING_SPACE_TEST_WITH_RETURN(dev_priv);
	VB_AGE_TEST_WITH_RETURN(dev_priv);

	buf = dma->buflist[vertex->idx];

	if (buf->file_priv != file_priv) {
		DRM_ERROR("process %d using buffer owned by %p\n",
			  DRM_CURRENTPID, buf->file_priv);
		return -EINVAL;
	}

	if (buf->pending) {
		DRM_ERROR("sending pending buffer %d\n", vertex->idx);
		return -EINVAL;
	}

	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
		return -EINVAL;

	for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
		drm_radeon_prim_t prim;
		drm_radeon_tcl_prim_t tclprim;

		if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
			return -EFAULT;

		if (prim.stateidx != laststate) {
			drm_radeon_state_t state;

			if (DRM_COPY_FROM_USER(&state,
					       &vertex->state[prim.stateidx],
					       sizeof(state)))
				return -EFAULT;

			if (radeon_emit_state2(dev_priv, file_priv, &state)) {
				DRM_ERROR("radeon_emit_state2 failed\n");
				return -EINVAL;
			}

			laststate = prim.stateidx;
		}

		tclprim.start = prim.start;
		tclprim.finish = prim.finish;
		tclprim.prim = prim.prim;
		tclprim.vc_format = prim.vc_format;

		if (prim.prim & RADEON_PRIM_WALK_IND) {
			tclprim.offset = prim.numverts * 64;
			tclprim.numverts = RADEON_MAX_VB_VERTS;	/* duh */

			radeon_cp_dispatch_indices(dev, buf, &tclprim);
		} else {
			tclprim.numverts = prim.numverts;
			tclprim.offset = 0;	/* not used */

			radeon_cp_dispatch_vertex(dev, buf, &tclprim);
		}

		if (sarea_priv->nbox == 1)
			sarea_priv->nbox = 0;
	}

	if (vertex->discard) {
		radeon_cp_discard_buffer(dev, buf);
	}

	COMMIT_RING();
	return 0;
}

static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
			       struct drm_file *file_priv,
			       drm_radeon_cmd_header_t header,
			       drm_radeon_kcmd_buffer_t *cmdbuf)
{
	int id = (int)header.packet.packet_id;
	int sz, reg;
	int *data = (int *)cmdbuf->buf;
	RING_LOCALS;

	if (id >= RADEON_MAX_STATE_PACKETS)
		return -EINVAL;

	sz = packet[id].len;
	reg = packet[id].start;

	if (sz * sizeof(int) > cmdbuf->bufsz) {
		DRM_ERROR("Packet size provided larger than data provided\n");
		return -EINVAL;
	}

	if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
		DRM_ERROR("Packet verification failed\n");
		return -EINVAL;
	}

	BEGIN_RING(sz + 1);
	OUT_RING(CP_PACKET0(reg, (sz - 1)));
	OUT_RING_TABLE(data, sz);
	ADVANCE_RING();

	cmdbuf->buf += sz * sizeof(int);
	cmdbuf->bufsz -= sz * sizeof(int);
	return 0;
}

static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
					  drm_radeon_cmd_header_t header,
					  drm_radeon_kcmd_buffer_t *cmdbuf)
{
	int sz = header.scalars.count;
	int start = header.scalars.offset;
	int stride = header.scalars.stride;
	RING_LOCALS;

	BEGIN_RING(3 + sz);
	OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
	OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
	OUT_RING_TABLE(cmdbuf->buf, sz);
	ADVANCE_RING();
	cmdbuf->buf += sz * sizeof(int);
	cmdbuf->bufsz -= sz * sizeof(int);
	return 0;
}

/* God this is ugly
 */
static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
					   drm_radeon_cmd_header_t header,
					   drm_radeon_kcmd_buffer_t *cmdbuf)
{
	int sz = header.scalars.count;
	int start = ((unsigned int)header.scalars.offset) + 0x100;
	int stride = header.scalars.stride;
	RING_LOCALS;

	BEGIN_RING(3 + sz);
	OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
	OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
	OUT_RING_TABLE(cmdbuf->buf, sz);
	ADVANCE_RING();
	cmdbuf->buf += sz * sizeof(int);
	cmdbuf->bufsz -= sz * sizeof(int);
	return 0;
}

static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
					  drm_radeon_cmd_header_t header,
					  drm_radeon_kcmd_buffer_t *cmdbuf)
{
	int sz = header.vectors.count;
	int start = header.vectors.offset;
	int stride = header.vectors.stride;
	RING_LOCALS;

	BEGIN_RING(5 + sz);
	OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
	OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
	OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
	OUT_RING_TABLE(cmdbuf->buf, sz);
	ADVANCE_RING();

	cmdbuf->buf += sz * sizeof(int);
	cmdbuf->bufsz -= sz * sizeof(int);
	return 0;
}

static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
					  drm_radeon_cmd_header_t header,
					  drm_radeon_kcmd_buffer_t *cmdbuf)
{
	int sz = header.veclinear.count * 4;
	int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8);
	RING_LOCALS;

	if (!sz)
		return 0;
	if (sz * 4 > cmdbuf->bufsz)
		return -EINVAL;

	BEGIN_RING(5 + sz);
	OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
	OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
	OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
	OUT_RING_TABLE(cmdbuf->buf, sz);
	ADVANCE_RING();

	cmdbuf->buf += sz * sizeof(int);
	cmdbuf->bufsz -= sz * sizeof(int);
	return 0;
}

static int radeon_emit_packet3(struct drm_device * dev,
			       struct drm_file *file_priv,
			       drm_radeon_kcmd_buffer_t *cmdbuf)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	unsigned int cmdsz;
	int ret;
	RING_LOCALS;

	DRM_DEBUG("\n");

	if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
						  cmdbuf, &cmdsz))) {
		DRM_ERROR("Packet verification failed\n");
		return ret;
	}

	BEGIN_RING(cmdsz);
	OUT_RING_TABLE(cmdbuf->buf, cmdsz);
	ADVANCE_RING();

	cmdbuf->buf += cmdsz * 4;
	cmdbuf->bufsz -= cmdsz * 4;
	return 0;
}

static int radeon_emit_packet3_cliprect(struct drm_device *dev,
					struct drm_file *file_priv,
					drm_radeon_kcmd_buffer_t *cmdbuf,
					int orig_nbox)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	struct drm_clip_rect box;
	unsigned int cmdsz;
	int ret;
	struct drm_clip_rect __user *boxes = cmdbuf->boxes;
	int i = 0;
	RING_LOCALS;

	DRM_DEBUG("\n");

	if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
						  cmdbuf, &cmdsz))) {
		DRM_ERROR("Packet verification failed\n");
		return ret;
	}

	if (!orig_nbox)
		goto out;

	do {
		if (i < cmdbuf->nbox) {
			if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
				return -EFAULT;
			/* FIXME The second and subsequent times round
			 * this loop, send a WAIT_UNTIL_3D_IDLE before
			 * calling emit_clip_rect(). This fixes a
			 * lockup on fast machines when sending
			 * several cliprects with a cmdbuf, as when
			 * waving a 2D window over a 3D
			 * window. Something in the commands from user
			 * space seems to hang the card when they're
			 * sent several times in a row. That would be
			 * the correct place to fix it but this works
			 * around it until I can figure that out - Tim
			 * Smith */
			if (i) {
				BEGIN_RING(2);
				RADEON_WAIT_UNTIL_3D_IDLE();
				ADVANCE_RING();
			}
			radeon_emit_clip_rect(dev_priv, &box);
		}

		BEGIN_RING(cmdsz);
		OUT_RING_TABLE(cmdbuf->buf, cmdsz);
		ADVANCE_RING();

	} while (++i < cmdbuf->nbox);
	if (cmdbuf->nbox == 1)
		cmdbuf->nbox = 0;

      out:
	cmdbuf->buf += cmdsz * 4;
	cmdbuf->bufsz -= cmdsz * 4;
	return 0;
}

static int radeon_emit_wait(struct drm_device * dev, int flags)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	RING_LOCALS;

	DRM_DEBUG("%x\n", flags);
	switch (flags) {
	case RADEON_WAIT_2D:
		BEGIN_RING(2);
		RADEON_WAIT_UNTIL_2D_IDLE();
		ADVANCE_RING();
		break;
	case RADEON_WAIT_3D:
		BEGIN_RING(2);
		RADEON_WAIT_UNTIL_3D_IDLE();
		ADVANCE_RING();
		break;
	case RADEON_WAIT_2D | RADEON_WAIT_3D:
		BEGIN_RING(2);
		RADEON_WAIT_UNTIL_IDLE();
		ADVANCE_RING();
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	struct drm_device_dma *dma = dev->dma;
	struct drm_buf *buf = NULL;
	int idx;
	drm_radeon_kcmd_buffer_t *cmdbuf = data;
	drm_radeon_cmd_header_t header;
	int orig_nbox, orig_bufsz;
	char *kbuf = NULL;

	LOCK_TEST_WITH_RETURN(dev, file_priv);

	if (!dev_priv) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}

	RING_SPACE_TEST_WITH_RETURN(dev_priv);
	VB_AGE_TEST_WITH_RETURN(dev_priv);

	if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
		return -EINVAL;
	}

	/* Allocate an in-kernel area and copy in the cmdbuf.  Do this to avoid
	 * races between checking values and using those values in other code,
	 * and simply to avoid a lot of function calls to copy in data.
	 */
	orig_bufsz = cmdbuf->bufsz;
	if (orig_bufsz != 0) {
		kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER);
		if (kbuf == NULL)
			return -ENOMEM;
		if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
				       cmdbuf->bufsz)) {
			drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
			return -EFAULT;
		}
		cmdbuf->buf = kbuf;
	}

	orig_nbox = cmdbuf->nbox;

	if (dev_priv->microcode_version == UCODE_R300) {
		int temp;
		temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);

		if (orig_bufsz != 0)
			drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);

		return temp;
	}

	/* microcode_version != r300 */
	while (cmdbuf->bufsz >= sizeof(header)) {

		header.i = *(int *)cmdbuf->buf;
		cmdbuf->buf += sizeof(header);
		cmdbuf->bufsz -= sizeof(header);

		switch (header.header.cmd_type) {
		case RADEON_CMD_PACKET:
			DRM_DEBUG("RADEON_CMD_PACKET\n");
			if (radeon_emit_packets
			    (dev_priv, file_priv, header, cmdbuf)) {
				DRM_ERROR("radeon_emit_packets failed\n");
				goto err;
			}
			break;

		case RADEON_CMD_SCALARS:
			DRM_DEBUG("RADEON_CMD_SCALARS\n");
			if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
				DRM_ERROR("radeon_emit_scalars failed\n");
				goto err;
			}
			break;

		case RADEON_CMD_VECTORS:
			DRM_DEBUG("RADEON_CMD_VECTORS\n");
			if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
				DRM_ERROR("radeon_emit_vectors failed\n");
				goto err;
			}
			break;

		case RADEON_CMD_DMA_DISCARD:
			DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
			idx = header.dma.buf_idx;
			if (idx < 0 || idx >= dma->buf_count) {
				DRM_ERROR("buffer index %d (of %d max)\n",
					  idx, dma->buf_count - 1);
				goto err;
			}

			buf = dma->buflist[idx];
			if (buf->file_priv != file_priv || buf->pending) {
				DRM_ERROR("bad buffer %p %p %d\n",
					  buf->file_priv, file_priv,
					  buf->pending);
				goto err;
			}

			radeon_cp_discard_buffer(dev, buf);
			break;

		case RADEON_CMD_PACKET3:
			DRM_DEBUG("RADEON_CMD_PACKET3\n");
			if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
				DRM_ERROR("radeon_emit_packet3 failed\n");
				goto err;
			}
			break;

		case RADEON_CMD_PACKET3_CLIP:
			DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
			if (radeon_emit_packet3_cliprect
			    (dev, file_priv, cmdbuf, orig_nbox)) {
				DRM_ERROR("radeon_emit_packet3_clip failed\n");
				goto err;
			}
			break;

		case RADEON_CMD_SCALARS2:
			DRM_DEBUG("RADEON_CMD_SCALARS2\n");
			if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
				DRM_ERROR("radeon_emit_scalars2 failed\n");
				goto err;
			}
			break;

		case RADEON_CMD_WAIT:
			DRM_DEBUG("RADEON_CMD_WAIT\n");
			if (radeon_emit_wait(dev, header.wait.flags)) {
				DRM_ERROR("radeon_emit_wait failed\n");
				goto err;
			}
			break;
		case RADEON_CMD_VECLINEAR:
			DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
			if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
				DRM_ERROR("radeon_emit_veclinear failed\n");
				goto err;
			}
			break;

		default:
			DRM_ERROR("bad cmd_type %d at %p\n",
				  header.header.cmd_type,
				  cmdbuf->buf - sizeof(header));
			goto err;
		}
	}

	if (orig_bufsz != 0)
		drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);

	DRM_DEBUG("DONE\n");
	COMMIT_RING();
	return 0;

      err:
	if (orig_bufsz != 0)
		drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
	return -EINVAL;
}

static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_getparam_t *param = data;
	int value;

	if (!dev_priv) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}

	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);

	switch (param->param) {
	case RADEON_PARAM_GART_BUFFER_OFFSET:
		value = dev_priv->gart_buffers_offset;
		break;
	case RADEON_PARAM_LAST_FRAME:
		dev_priv->stats.last_frame_reads++;
		value = GET_SCRATCH(0);
		break;
	case RADEON_PARAM_LAST_DISPATCH:
		value = GET_SCRATCH(1);
		break;
	case RADEON_PARAM_LAST_CLEAR:
		dev_priv->stats.last_clear_reads++;
		value = GET_SCRATCH(2);
		break;
	case RADEON_PARAM_IRQ_NR:
		value = dev->irq;
		break;
	case RADEON_PARAM_GART_BASE:
		value = dev_priv->gart_vm_start;
		break;
	case RADEON_PARAM_REGISTER_HANDLE:
		value = dev_priv->mmio->offset;
		break;
	case RADEON_PARAM_STATUS_HANDLE:
		value = dev_priv->ring_rptr_offset;
		break;
#ifndef __LP64__
		/*
		 * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
		 * pointer which can't fit into an int-sized variable.  According to
		 * Michel Dänzer, the ioctl() is only used on embedded platforms, so
		 * not supporting it shouldn't be a problem.  If the same functionality
		 * is needed on 64-bit platforms, a new ioctl() would have to be added,
		 * so backwards-compatibility for the embedded platforms can be
		 * maintained.  --davidm 4-Feb-2004.
		 */
	case RADEON_PARAM_SAREA_HANDLE:
		/* The lock is the first dword in the sarea. */
		value = (long)dev->lock.hw_lock;
		break;
#endif
	case RADEON_PARAM_GART_TEX_HANDLE:
		value = dev_priv->gart_textures_offset;
		break;
	case RADEON_PARAM_SCRATCH_OFFSET:
		if (!dev_priv->writeback_works)
			return -EINVAL;
		value = RADEON_SCRATCH_REG_OFFSET;
		break;

	case RADEON_PARAM_CARD_TYPE:
		if (dev_priv->flags & RADEON_IS_PCIE)
			value = RADEON_CARD_PCIE;
		else if (dev_priv->flags & RADEON_IS_AGP)
			value = RADEON_CARD_AGP;
		else
			value = RADEON_CARD_PCI;
		break;
	case RADEON_PARAM_VBLANK_CRTC:
		value = radeon_vblank_crtc_get(dev);
		break;
	case RADEON_PARAM_FB_LOCATION:
		value = radeon_read_fb_location(dev_priv);
		break;
	case RADEON_PARAM_NUM_GB_PIPES:
		value = dev_priv->num_gb_pipes;
		break;
	default:
		DRM_DEBUG( "Invalid parameter %d\n", param->param );
		return -EINVAL;
	}

	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
		DRM_ERROR("copy_to_user\n");
		return -EFAULT;
	}

	return 0;
}

static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_radeon_private_t *dev_priv = dev->dev_private;
	drm_radeon_setparam_t *sp = data;
	struct drm_radeon_driver_file_fields *radeon_priv;

	if (!dev_priv) {
		DRM_ERROR("called with no initialization\n");
		return -EINVAL;
	}

	switch (sp->param) {
	case RADEON_SETPARAM_FB_LOCATION:
		radeon_priv = file_priv->driver_priv;