Plaster
New
List
Login
common-lisp
default
shinmera
2023.06.30 19:14:05
(defmethod simulate-particles :after ((emitter sorted-particle-emitter)) (with-all-slots-bound (emitter sorted-particle-emitter) (let* ((max-particles (max-particles emitter)) (thread-groups (1+ (ash (1- max-particles) -9))) (alive (with-buffer-tx (struct particle-counter-buffer :update :read) (- (max-particles emitter) (slot-value struct 'dead-count)))) (presorted 512)) (setf (slot-value sort-pass 'elements) alive) (render sort-pass (vec (1+ (ash (1- alive) -9)) 1 1)) (when (< 1 thread-groups) (setf (slot-value sort-step-pass 'elements) alive) (setf (slot-value sort-inner-pass 'elements) alive) (loop with done = NIL for thread-groups = 0 until done do (setf done T) (when (< presorted max-particles) (when (< (* presorted 2) max-particles) (setf done NIL)) ;; Set number of thread groups to fit. This is imo done real dumb in the ;; original code. Why not just a log? (let ((pow2 presorted)) (loop while (< pow2 max-particles) do (setf pow2 (* pow2 2))) (setf thread-groups (ash pow2 -9)))) (loop with merge-size = (* presorted 2) for sub-size = (ash merge-size -1) then (ash sub-size -1) while (< 256 sub-size) do (setf (slot-value sort-step-pass 'job-params) (if (= sub-size (ash merge-size -1)) (vec sub-size (1- (* sub-size 2)) -1 0) (vec sub-size sub-size +1 0))) (render sort-step-pass (vec thread-groups 1 1))) (render sort-inner-pass (vec thread-groups 1 1)) (setf presorted (* presorted 2)))))))
Raw
Annotate
Repaste
Annotations
c++src
default
shinmera
2023.06.30 19:14:23
if (SORTING) { device->EventBegin("SortEmittedParticles", threadID); // initialize sorting arguments: device->BindCS(kickoffSortCS, threadID); device->Dispatch(1, 1, 1, threadID); // initial sorting: bool bDone = true; // calculate how many threads we'll require: // we'll sort 512 elements per CU (threadgroupsize 256) // maybe need to optimize this or make it changeable during init // TGS=256 is a good intermediate value unsigned int numThreadGroups = ((MAX_PARTICLES - 1) >> 9) + 1; if (numThreadGroups>1) bDone = false; // sort all buffers of size 512 (and presort bigger ones) device->BindCS(sortCS, threadID); device->DispatchIndirect(indirectBuffers, ARGUMENTBUFFER_OFFSET_DISPATCHSORT, threadID); int presorted = 512; while (!bDone) { bDone = true; device->BindCS(sortStepCS, threadID); // prepare thread group description data unsigned int numThreadGroups = 0; if (MAX_PARTICLES > (uint32_t)presorted) { if (MAX_PARTICLES>(uint32_t)presorted * 2) bDone = false; unsigned int pow2 = presorted; while (pow2<MAX_PARTICLES) pow2 *= 2; numThreadGroups = pow2 >> 9; } unsigned int nMergeSize = presorted * 2; for (unsigned int nMergeSubSize = nMergeSize >> 1; nMergeSubSize>256; nMergeSubSize = nMergeSubSize >> 1) // for( int nMergeSubSize=nMergeSize>>1; nMergeSubSize>0; nMergeSubSize=nMergeSubSize>>1 ) { SortConstants sc; sc.job_params.x = nMergeSubSize; if (nMergeSubSize == nMergeSize >> 1) { sc.job_params.y = (2 * nMergeSubSize - 1); sc.job_params.z = -1; } else { sc.job_params.y = nMergeSubSize; sc.job_params.z = 1; } sc.job_params.w = 0; device->UpdateBuffer(sortCB, &sc, threadID); device->Dispatch(numThreadGroups, 1, 1, threadID); } device->BindCS(sortInnerCS, threadID); device->Dispatch(numThreadGroups, 1, 1, threadID); presorted *= 2; }
Raw
Repaste