| |
| /* |
| * Copyright 2011 Google Inc. |
| * |
| * Use of this source code is governed by a BSD-style license that can be |
| * found in the LICENSE file. |
| */ |
| |
| |
| #include "GrInOrderDrawBuffer.h" |
| #include "GrBufferAllocPool.h" |
| #include "GrGpu.h" |
| #include "GrIndexBuffer.h" |
| #include "GrPath.h" |
| #include "GrRenderTarget.h" |
| #include "GrTexture.h" |
| #include "GrVertexBuffer.h" |
| |
| GrInOrderDrawBuffer::GrInOrderDrawBuffer(const GrGpu* gpu, |
| GrVertexBufferAllocPool* vertexPool, |
| GrIndexBufferAllocPool* indexPool) |
| : fAutoFlushTarget(NULL) |
| , fClipSet(true) |
| , fClipProxyState(kUnknown_ClipProxyState) |
| , fVertexPool(*vertexPool) |
| , fIndexPool(*indexPool) |
| , fFlushing(false) { |
| |
| fGpu.reset(SkRef(gpu)); |
| fCaps = gpu->getCaps(); |
| |
| GrAssert(NULL != vertexPool); |
| GrAssert(NULL != indexPool); |
| |
| GeometryPoolState& poolState = fGeoPoolStateStack.push_back(); |
| poolState.fUsedPoolVertexBytes = 0; |
| poolState.fUsedPoolIndexBytes = 0; |
| #if GR_DEBUG |
| poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0; |
| poolState.fPoolStartVertex = ~0; |
| poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0; |
| poolState.fPoolStartIndex = ~0; |
| #endif |
| this->reset(); |
| } |
| |
| GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { |
| this->reset(); |
| // This must be called by before the GrDrawTarget destructor |
| this->releaseGeometry(); |
| GrSafeUnref(fAutoFlushTarget); |
| } |
| |
| //////////////////////////////////////////////////////////////////////////////// |
| |
| namespace { |
| void get_vertex_bounds(const void* vertices, |
| size_t vertexSize, |
| int vertexCount, |
| SkRect* bounds) { |
| GrAssert(vertexSize >= sizeof(GrPoint)); |
| GrAssert(vertexCount > 0); |
| const GrPoint* point = static_cast<const GrPoint*>(vertices); |
| bounds->fLeft = bounds->fRight = point->fX; |
| bounds->fTop = bounds->fBottom = point->fY; |
| for (int i = 1; i < vertexCount; ++i) { |
| point = reinterpret_cast<GrPoint*>(reinterpret_cast<intptr_t>(point) + vertexSize); |
| bounds->growToInclude(point->fX, point->fY); |
| } |
| } |
| } |
| |
| void GrInOrderDrawBuffer::drawRect(const GrRect& rect, |
| const SkMatrix* matrix, |
| const GrRect* srcRects[], |
| const SkMatrix* srcMatrices[]) { |
| |
| GrVertexLayout layout = 0; |
| GrDrawState::AutoColorRestore acr; |
| GrColor color = this->drawState()->getColor(); |
| |
| // Using per-vertex colors allows batching across colors. (A lot of rects in a row differing |
| // only in color is a common occurrence in tables). However, having per-vertex colors disables |
| // blending optimizations because we don't know if the color will be solid or not. These |
| // optimizations help determine whether coverage and color can be blended correctly when |
| // dual-source blending isn't available. This comes into play when there is coverage. If colors |
| // were a stage it could take a hint that every vertex's color will be opaque. |
| if (this->getCaps().dualSourceBlendingSupport() || |
| this->getDrawState().hasSolidCoverage(this->getGeomSrc().fVertexLayout)) { |
| layout |= GrDrawState::kColor_VertexLayoutBit;; |
| // We set the draw state's color to white here. This is done so that any batching performed |
| // in our subclass's onDraw() won't get a false from GrDrawState::op== due to a color |
| // mismatch. TODO: Once vertex layout is owned by GrDrawState it should skip comparing the |
| // constant color in its op== when the kColor layout bit is set and then we can remove this. |
| acr.set(this->drawState(), 0xFFFFFFFF); |
| } |
| |
| uint32_t explicitCoordMask = 0; |
| if (NULL != srcRects) { |
| for (int s = 0; s < GrDrawState::kNumStages; ++s) { |
| int numTC = 0; |
| if (NULL != srcRects[s]) { |
| layout |= GrDrawState::StageTexCoordVertexLayoutBit(s, numTC); |
| ++numTC; |
| explicitCoordMask |= (1 << s); |
| } |
| } |
| } |
| |
| AutoReleaseGeometry geo(this, layout, 4, 0); |
| if (!geo.succeeded()) { |
| GrPrintf("Failed to get space for vertices!\n"); |
| return; |
| } |
| |
| // Go to device coords to allow batching across matrix changes |
| SkMatrix combinedMatrix; |
| if (NULL != matrix) { |
| combinedMatrix = *matrix; |
| } else { |
| combinedMatrix.reset(); |
| } |
| combinedMatrix.postConcat(this->drawState()->getViewMatrix()); |
| // When the caller has provided an explicit source rects for a stage then we don't want to |
| // modify that stage's matrix. Otherwise if the effect is generating its source rect from |
| // the vertex positions then we have to account for the view matrix change. |
| GrDrawState::AutoDeviceCoordDraw adcd(this->drawState(), explicitCoordMask); |
| if (!adcd.succeeded()) { |
| return; |
| } |
| |
| int stageOffsets[GrDrawState::kNumStages], colorOffset; |
| int vsize = GrDrawState::VertexSizeAndOffsetsByStage(layout, stageOffsets, |
| &colorOffset, NULL, NULL); |
| |
| geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vsize); |
| combinedMatrix.mapPointsWithStride(geo.positions(), vsize, 4); |
| |
| SkRect devBounds; |
| // since we already computed the dev verts, set the bounds hint. This will help us avoid |
| // unnecessary clipping in our onDraw(). |
| get_vertex_bounds(geo.vertices(), vsize, 4, &devBounds); |
| |
| for (int i = 0; i < GrDrawState::kNumStages; ++i) { |
| if (explicitCoordMask & (1 << i)) { |
| GrAssert(0 != stageOffsets[i]); |
| GrPoint* coords = GrTCast<GrPoint*>(GrTCast<intptr_t>(geo.vertices()) + |
| stageOffsets[i]); |
| coords->setRectFan(srcRects[i]->fLeft, srcRects[i]->fTop, |
| srcRects[i]->fRight, srcRects[i]->fBottom, |
| vsize); |
| if (NULL != srcMatrices && NULL != srcMatrices[i]) { |
| srcMatrices[i]->mapPointsWithStride(coords, vsize, 4); |
| } |
| } else { |
| GrAssert(0 == stageOffsets[i]); |
| } |
| } |
| |
| if (colorOffset >= 0) { |
| GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + colorOffset); |
| for (int i = 0; i < 4; ++i) { |
| *vertColor = color; |
| vertColor = (GrColor*) ((intptr_t) vertColor + vsize); |
| } |
| } |
| |
| this->setIndexSourceToBuffer(fGpu->getQuadIndexBuffer()); |
| this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds); |
| } |
| |
| bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) { |
| if (!this->getDrawState().isClipState()) { |
| return true; |
| } |
| if (kUnknown_ClipProxyState == fClipProxyState) { |
| SkIRect rect; |
| bool iior; |
| this->getClip()->getConservativeBounds(this->getDrawState().getRenderTarget(), &rect, &iior); |
| if (iior) { |
| // The clip is a rect. We will remember that in fProxyClip. It is common for an edge (or |
| // all edges) of the clip to be at the edge of the RT. However, we get that clipping for |
| // free via the viewport. We don't want to think that clipping must be enabled in this |
| // case. So we extend the clip outward from the edge to avoid these false negatives. |
| fClipProxyState = kValid_ClipProxyState; |
| fClipProxy = SkRect::MakeFromIRect(rect); |
| |
| if (fClipProxy.fLeft <= 0) { |
| fClipProxy.fLeft = SK_ScalarMin; |
| } |
| if (fClipProxy.fTop <= 0) { |
| fClipProxy.fTop = SK_ScalarMin; |
| } |
| if (fClipProxy.fRight >= this->getDrawState().getRenderTarget()->width()) { |
| fClipProxy.fRight = SK_ScalarMax; |
| } |
| if (fClipProxy.fBottom >= this->getDrawState().getRenderTarget()->height()) { |
| fClipProxy.fBottom = SK_ScalarMax; |
| } |
| } else { |
| fClipProxyState = kInvalid_ClipProxyState; |
| } |
| } |
| if (kValid_ClipProxyState == fClipProxyState) { |
| return fClipProxy.contains(devBounds); |
| } |
| SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX), |
| SkIntToScalar(this->getClip()->fOrigin.fY)}; |
| SkRect clipSpaceBounds = devBounds; |
| clipSpaceBounds.offset(originOffset); |
| return this->getClip()->fClipStack->quickContains(clipSpaceBounds); |
| } |
| |
| int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) { |
| GrAssert(info.isInstanced()); |
| |
| const GeometrySrcState& geomSrc = this->getGeomSrc(); |
| |
| // we only attempt to concat the case when reserved verts are used with a client-specified index |
| // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated |
| // between draws. |
| if (kReserved_GeometrySrcType != geomSrc.fVertexSrc || |
| kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { |
| return 0; |
| } |
| // Check if there is a draw info that is compatible that uses the same VB from the pool and |
| // the same IB |
| if (kDraw_Cmd != fCmds.back()) { |
| return 0; |
| } |
| |
| DrawRecord* draw = &fDraws.back(); |
| GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer; |
| |
| if (!draw->isInstanced() || |
| draw->verticesPerInstance() != info.verticesPerInstance() || |
| draw->indicesPerInstance() != info.indicesPerInstance() || |
| draw->fVertexBuffer != vertexBuffer || |
| draw->fIndexBuffer != geomSrc.fIndexBuffer || |
| draw->fVertexLayout != geomSrc.fVertexLayout) { |
| return 0; |
| } |
| // info does not yet account for the offset from the start of the pool's VB while the previous |
| // draw record does. |
| int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex(); |
| if (draw->startVertex() + draw->vertexCount() != adjustedStartVertex) { |
| return 0; |
| } |
| |
| GrAssert(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount()); |
| |
| // how many instances can be concat'ed onto draw given the size of the index buffer |
| int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance(); |
| instancesToConcat -= draw->instanceCount(); |
| instancesToConcat = GrMin(instancesToConcat, info.instanceCount()); |
| |
| // update the amount of reserved vertex data actually referenced in draws |
| size_t vertexBytes = instancesToConcat * info.verticesPerInstance() * |
| GrDrawState::VertexSize(draw->fVertexLayout); |
| poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes); |
| |
| draw->adjustInstanceCount(instancesToConcat); |
| return instancesToConcat; |
| } |
| |
| class AutoClipReenable { |
| public: |
| AutoClipReenable() : fDrawState(NULL) {} |
| ~AutoClipReenable() { |
| if (NULL != fDrawState) { |
| fDrawState->enableState(GrDrawState::kClip_StateBit); |
| } |
| } |
| void set(GrDrawState* drawState) { |
| if (drawState->isClipState()) { |
| fDrawState = drawState; |
| drawState->disableState(GrDrawState::kClip_StateBit); |
| } |
| } |
| private: |
| GrDrawState* fDrawState; |
| }; |
| |
| void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) { |
| |
| GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| AutoClipReenable acr; |
| |
| if (this->getDrawState().isClipState() && |
| NULL != info.getDevBounds() && |
| this->quickInsideClip(*info.getDevBounds())) { |
| acr.set(this->drawState()); |
| } |
| |
| if (this->needsNewClip()) { |
| this->recordClip(); |
| } |
| if (this->needsNewState()) { |
| this->recordState(); |
| } |
| |
| DrawRecord* draw; |
| if (info.isInstanced()) { |
| int instancesConcated = this->concatInstancedDraw(info); |
| if (info.instanceCount() > instancesConcated) { |
| draw = this->recordDraw(info); |
| draw->adjustInstanceCount(-instancesConcated); |
| } else { |
| return; |
| } |
| } else { |
| draw = this->recordDraw(info); |
| } |
| draw->fVertexLayout = this->getVertexLayout(); |
| |
| switch (this->getGeomSrc().fVertexSrc) { |
| case kBuffer_GeometrySrcType: |
| draw->fVertexBuffer = this->getGeomSrc().fVertexBuffer; |
| break; |
| case kReserved_GeometrySrcType: // fallthrough |
| case kArray_GeometrySrcType: { |
| size_t vertexBytes = (info.vertexCount() + info.startVertex()) * |
| GrDrawState::VertexSize(draw->fVertexLayout); |
| poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes); |
| draw->fVertexBuffer = poolState.fPoolVertexBuffer; |
| draw->adjustStartVertex(poolState.fPoolStartVertex); |
| break; |
| } |
| default: |
| GrCrash("unknown geom src type"); |
| } |
| draw->fVertexBuffer->ref(); |
| |
| if (info.isIndexed()) { |
| switch (this->getGeomSrc().fIndexSrc) { |
| case kBuffer_GeometrySrcType: |
| draw->fIndexBuffer = this->getGeomSrc().fIndexBuffer; |
| break; |
| case kReserved_GeometrySrcType: // fallthrough |
| case kArray_GeometrySrcType: { |
| size_t indexBytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t); |
| poolState.fUsedPoolIndexBytes = GrMax(poolState.fUsedPoolIndexBytes, indexBytes); |
| draw->fIndexBuffer = poolState.fPoolIndexBuffer; |
| draw->adjustStartIndex(poolState.fPoolStartIndex); |
| break; |
| } |
| default: |
| GrCrash("unknown geom src type"); |
| } |
| draw->fIndexBuffer->ref(); |
| } else { |
| draw->fIndexBuffer = NULL; |
| } |
| } |
| |
| GrInOrderDrawBuffer::StencilPath::StencilPath() : fStroke(SkStrokeRec::kFill_InitStyle) {} |
| |
| void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, const SkStrokeRec& stroke, |
| SkPath::FillType fill) { |
| if (this->needsNewClip()) { |
| this->recordClip(); |
| } |
| // Only compare the subset of GrDrawState relevant to path stenciling? |
| if (this->needsNewState()) { |
| this->recordState(); |
| } |
| StencilPath* sp = this->recordStencilPath(); |
| sp->fPath.reset(path); |
| path->ref(); |
| sp->fFill = fill; |
| sp->fStroke = stroke; |
| } |
| |
| void GrInOrderDrawBuffer::clear(const GrIRect* rect, GrColor color, GrRenderTarget* renderTarget) { |
| GrIRect r; |
| if (NULL == renderTarget) { |
| renderTarget = this->drawState()->getRenderTarget(); |
| GrAssert(NULL != renderTarget); |
| } |
| if (NULL == rect) { |
| // We could do something smart and remove previous draws and clears to |
| // the current render target. If we get that smart we have to make sure |
| // those draws aren't read before this clear (render-to-texture). |
| r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); |
| rect = &r; |
| } |
| Clear* clr = this->recordClear(); |
| clr->fColor = color; |
| clr->fRect = *rect; |
| clr->fRenderTarget = renderTarget; |
| renderTarget->ref(); |
| } |
| |
| void GrInOrderDrawBuffer::reset() { |
| GrAssert(1 == fGeoPoolStateStack.count()); |
| this->resetVertexSource(); |
| this->resetIndexSource(); |
| int numDraws = fDraws.count(); |
| for (int d = 0; d < numDraws; ++d) { |
| // we always have a VB, but not always an IB |
| GrAssert(NULL != fDraws[d].fVertexBuffer); |
| fDraws[d].fVertexBuffer->unref(); |
| GrSafeUnref(fDraws[d].fIndexBuffer); |
| } |
| fCmds.reset(); |
| fDraws.reset(); |
| fStencilPaths.reset(); |
| fStates.reset(); |
| fClears.reset(); |
| fVertexPool.reset(); |
| fIndexPool.reset(); |
| fClips.reset(); |
| fClipOrigins.reset(); |
| fClipSet = true; |
| } |
| |
| bool GrInOrderDrawBuffer::flushTo(GrDrawTarget* target) { |
| GrAssert(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc); |
| GrAssert(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc); |
| |
| GrAssert(NULL != target); |
| GrAssert(target != this); // not considered and why? |
| |
| int numCmds = fCmds.count(); |
| if (0 == numCmds) { |
| return false; |
| } |
| |
| fVertexPool.unlock(); |
| fIndexPool.unlock(); |
| |
| GrDrawTarget::AutoClipRestore acr(target); |
| AutoGeometryPush agp(target); |
| |
| GrDrawState playbackState; |
| GrDrawState* prevDrawState = target->drawState(); |
| prevDrawState->ref(); |
| target->setDrawState(&playbackState); |
| |
| GrClipData clipData; |
| |
| int currState = 0; |
| int currClip = 0; |
| int currClear = 0; |
| int currDraw = 0; |
| int currStencilPath = 0; |
| |
| |
| for (int c = 0; c < numCmds; ++c) { |
| switch (fCmds[c]) { |
| case kDraw_Cmd: { |
| const DrawRecord& draw = fDraws[currDraw]; |
| target->setVertexSourceToBuffer(draw.fVertexLayout, draw.fVertexBuffer); |
| if (draw.isIndexed()) { |
| target->setIndexSourceToBuffer(draw.fIndexBuffer); |
| } |
| target->executeDraw(draw); |
| |
| ++currDraw; |
| break; |
| } |
| case kStencilPath_Cmd: { |
| const StencilPath& sp = fStencilPaths[currStencilPath]; |
| target->stencilPath(sp.fPath.get(), sp.fStroke, sp.fFill); |
| ++currStencilPath; |
| break; |
| } |
| case kSetState_Cmd: |
| fStates[currState].restoreTo(&playbackState); |
| ++currState; |
| break; |
| case kSetClip_Cmd: |
| clipData.fClipStack = &fClips[currClip]; |
| clipData.fOrigin = fClipOrigins[currClip]; |
| target->setClip(&clipData); |
| ++currClip; |
| break; |
| case kClear_Cmd: |
| target->clear(&fClears[currClear].fRect, |
| fClears[currClear].fColor, |
| fClears[currClear].fRenderTarget); |
| ++currClear; |
| break; |
| } |
| } |
| // we should have consumed all the states, clips, etc. |
| GrAssert(fStates.count() == currState); |
| GrAssert(fClips.count() == currClip); |
| GrAssert(fClipOrigins.count() == currClip); |
| GrAssert(fClears.count() == currClear); |
| GrAssert(fDraws.count() == currDraw); |
| |
| target->setDrawState(prevDrawState); |
| prevDrawState->unref(); |
| this->reset(); |
| return true; |
| } |
| |
| void GrInOrderDrawBuffer::setAutoFlushTarget(GrDrawTarget* target) { |
| GrSafeAssign(fAutoFlushTarget, target); |
| } |
| |
| void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace( |
| size_t vertexSize, |
| int vertexCount, |
| int indexCount) { |
| if (NULL != fAutoFlushTarget) { |
| // We use geometryHints() to know whether to flush the draw buffer. We |
| // can't flush if we are inside an unbalanced pushGeometrySource. |
| // Moreover, flushing blows away vertex and index data that was |
| // previously reserved. So if the vertex or index data is pulled from |
| // reserved space and won't be released by this request then we can't |
| // flush. |
| bool insideGeoPush = fGeoPoolStateStack.count() > 1; |
| |
| bool unreleasedVertexSpace = |
| !vertexCount && |
| kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc; |
| |
| bool unreleasedIndexSpace = |
| !indexCount && |
| kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc; |
| |
| // we don't want to finalize any reserved geom on the target since |
| // we don't know that the client has finished writing to it. |
| bool targetHasReservedGeom = |
| fAutoFlushTarget->hasReservedVerticesOrIndices(); |
| |
| int vcount = vertexCount; |
| int icount = indexCount; |
| |
| if (!insideGeoPush && |
| !unreleasedVertexSpace && |
| !unreleasedIndexSpace && |
| !targetHasReservedGeom && |
| this->geometryHints(vertexSize, &vcount, &icount)) { |
| |
| this->flushTo(fAutoFlushTarget); |
| } |
| } |
| } |
| |
| bool GrInOrderDrawBuffer::geometryHints(size_t vertexSize, |
| int* vertexCount, |
| int* indexCount) const { |
| // we will recommend a flush if the data could fit in a single |
| // preallocated buffer but none are left and it can't fit |
| // in the current buffer (which may not be prealloced). |
| bool flush = false; |
| if (NULL != indexCount) { |
| int32_t currIndices = fIndexPool.currentBufferIndices(); |
| if (*indexCount > currIndices && |
| (!fIndexPool.preallocatedBuffersRemaining() && |
| *indexCount <= fIndexPool.preallocatedBufferIndices())) { |
| |
| flush = true; |
| } |
| *indexCount = currIndices; |
| } |
| if (NULL != vertexCount) { |
| int32_t currVertices = fVertexPool.currentBufferVertices(vertexSize); |
| if (*vertexCount > currVertices && |
| (!fVertexPool.preallocatedBuffersRemaining() && |
| *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexSize))) { |
| |
| flush = true; |
| } |
| *vertexCount = currVertices; |
| } |
| return flush; |
| } |
| |
| bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize, |
| int vertexCount, |
| void** vertices) { |
| GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| GrAssert(vertexCount > 0); |
| GrAssert(NULL != vertices); |
| GrAssert(0 == poolState.fUsedPoolVertexBytes); |
| |
| *vertices = fVertexPool.makeSpace(vertexSize, |
| vertexCount, |
| &poolState.fPoolVertexBuffer, |
| &poolState.fPoolStartVertex); |
| return NULL != *vertices; |
| } |
| |
| bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) { |
| GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| GrAssert(indexCount > 0); |
| GrAssert(NULL != indices); |
| GrAssert(0 == poolState.fUsedPoolIndexBytes); |
| |
| *indices = fIndexPool.makeSpace(indexCount, |
| &poolState.fPoolIndexBuffer, |
| &poolState.fPoolStartIndex); |
| return NULL != *indices; |
| } |
| |
| void GrInOrderDrawBuffer::releaseReservedVertexSpace() { |
| GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| const GeometrySrcState& geoSrc = this->getGeomSrc(); |
| |
| // If we get a release vertex space call then our current source should either be reserved |
| // or array (which we copied into reserved space). |
| GrAssert(kReserved_GeometrySrcType == geoSrc.fVertexSrc || |
| kArray_GeometrySrcType == geoSrc.fVertexSrc); |
| |
| // When the caller reserved vertex buffer space we gave it back a pointer |
| // provided by the vertex buffer pool. At each draw we tracked the largest |
| // offset into the pool's pointer that was referenced. Now we return to the |
| // pool any portion at the tail of the allocation that no draw referenced. |
| size_t reservedVertexBytes = GrDrawState::VertexSize(geoSrc.fVertexLayout) * |
| geoSrc.fVertexCount; |
| fVertexPool.putBack(reservedVertexBytes - |
| poolState.fUsedPoolVertexBytes); |
| poolState.fUsedPoolVertexBytes = 0; |
| poolState.fPoolVertexBuffer = NULL; |
| poolState.fPoolStartVertex = 0; |
| } |
| |
| void GrInOrderDrawBuffer::releaseReservedIndexSpace() { |
| GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| const GeometrySrcState& geoSrc = this->getGeomSrc(); |
| |
| // If we get a release index space call then our current source should either be reserved |
| // or array (which we copied into reserved space). |
| GrAssert(kReserved_GeometrySrcType == geoSrc.fIndexSrc || |
| kArray_GeometrySrcType == geoSrc.fIndexSrc); |
| |
| // Similar to releaseReservedVertexSpace we return any unused portion at |
| // the tail |
| size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount; |
| fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes); |
| poolState.fUsedPoolIndexBytes = 0; |
| poolState.fPoolIndexBuffer = NULL; |
| poolState.fPoolStartIndex = 0; |
| } |
| |
| void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray, |
| int vertexCount) { |
| |
| GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| GrAssert(0 == poolState.fUsedPoolVertexBytes); |
| #if GR_DEBUG |
| bool success = |
| #endif |
| fVertexPool.appendVertices(GrDrawState::VertexSize(this->getVertexLayout()), |
| vertexCount, |
| vertexArray, |
| &poolState.fPoolVertexBuffer, |
| &poolState.fPoolStartVertex); |
| GR_DEBUGASSERT(success); |
| } |
| |
| void GrInOrderDrawBuffer::onSetIndexSourceToArray(const void* indexArray, |
| int indexCount) { |
| GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| GrAssert(0 == poolState.fUsedPoolIndexBytes); |
| #if GR_DEBUG |
| bool success = |
| #endif |
| fIndexPool.appendIndices(indexCount, |
| indexArray, |
| &poolState.fPoolIndexBuffer, |
| &poolState.fPoolStartIndex); |
| GR_DEBUGASSERT(success); |
| } |
| |
| void GrInOrderDrawBuffer::releaseVertexArray() { |
| // When the client provides an array as the vertex source we handled it |
| // by copying their array into reserved space. |
| this->GrInOrderDrawBuffer::releaseReservedVertexSpace(); |
| } |
| |
| void GrInOrderDrawBuffer::releaseIndexArray() { |
| // When the client provides an array as the index source we handled it |
| // by copying their array into reserved space. |
| this->GrInOrderDrawBuffer::releaseReservedIndexSpace(); |
| } |
| |
| void GrInOrderDrawBuffer::geometrySourceWillPush() { |
| GeometryPoolState& poolState = fGeoPoolStateStack.push_back(); |
| poolState.fUsedPoolVertexBytes = 0; |
| poolState.fUsedPoolIndexBytes = 0; |
| #if GR_DEBUG |
| poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0; |
| poolState.fPoolStartVertex = ~0; |
| poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0; |
| poolState.fPoolStartIndex = ~0; |
| #endif |
| } |
| |
| void GrInOrderDrawBuffer::geometrySourceWillPop( |
| const GeometrySrcState& restoredState) { |
| GrAssert(fGeoPoolStateStack.count() > 1); |
| fGeoPoolStateStack.pop_back(); |
| GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| // we have to assume that any slack we had in our vertex/index data |
| // is now unreleasable because data may have been appended later in the |
| // pool. |
| if (kReserved_GeometrySrcType == restoredState.fVertexSrc || |
| kArray_GeometrySrcType == restoredState.fVertexSrc) { |
| poolState.fUsedPoolVertexBytes = |
| GrDrawState::VertexSize(restoredState.fVertexLayout) * |
| restoredState.fVertexCount; |
| } |
| if (kReserved_GeometrySrcType == restoredState.fIndexSrc || |
| kArray_GeometrySrcType == restoredState.fIndexSrc) { |
| poolState.fUsedPoolIndexBytes = sizeof(uint16_t) * |
| restoredState.fIndexCount; |
| } |
| } |
| |
| bool GrInOrderDrawBuffer::needsNewState() const { |
| return fStates.empty() || !fStates.back().isEqual(this->getDrawState()); |
| } |
| |
| bool GrInOrderDrawBuffer::needsNewClip() const { |
| GrAssert(fClips.count() == fClipOrigins.count()); |
| if (this->getDrawState().isClipState()) { |
| if (fClipSet && |
| (fClips.empty() || |
| fClips.back() != *this->getClip()->fClipStack || |
| fClipOrigins.back() != this->getClip()->fOrigin)) { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| void GrInOrderDrawBuffer::recordClip() { |
| fClips.push_back() = *this->getClip()->fClipStack; |
| fClipOrigins.push_back() = this->getClip()->fOrigin; |
| fClipSet = false; |
| fCmds.push_back(kSetClip_Cmd); |
| } |
| |
| void GrInOrderDrawBuffer::recordState() { |
| fStates.push_back().saveFrom(this->getDrawState()); |
| fCmds.push_back(kSetState_Cmd); |
| } |
| |
| GrInOrderDrawBuffer::DrawRecord* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info) { |
| fCmds.push_back(kDraw_Cmd); |
| return &fDraws.push_back(info); |
| } |
| |
| GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath() { |
| fCmds.push_back(kStencilPath_Cmd); |
| return &fStencilPaths.push_back(); |
| } |
| |
| GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear() { |
| fCmds.push_back(kClear_Cmd); |
| return &fClears.push_back(); |
| } |
| |
| void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) { |
| INHERITED::clipWillBeSet(newClipData); |
| fClipSet = true; |
| fClipProxyState = kUnknown_ClipProxyState; |
| } |