Accelerate 개요
vDSP: 신호 처리라이브러리
배열에 대한 기초 연산(Add, subtract, multiply, conversion 등)
이산 푸리에 코사인 변환
convolution(합성곱), correlation(상관성)
예시 - 노이즈 제거
// 신호 변환
let dct_Setup_FORWARD: vDSP_DFT_Setup = {
guard let dctSetup = vDSP_DCT_CreateSetup)
nil, vDSP_Length(numSamples), .II) else {
fatalError("can't create FORWORD vDSP_DFT_Setup")
}
return dctSetup
}()
var forwardDCT = [Float](repeating: 0,
count: numSamples)
vDSP_DCT_Execute(dctSetup_FORWARD, noisySignalReal, &forwardDCT)
// threshold 기준으로 필터링
vDSP_vthres(forwardDCT, stride, &threshold, &forwardDCT, stride, count)
// 신호 복원
let dctSetup_INVERSE: vDSP_DFT_Setup = {
guard let dctSetup = vDSP_DCT_CreateSetup)
nil, vDSP_Length(numSamples), .III) else {
fatalError("can't create INVERSE vDSP_DFT_Setup")
}
return dctSetup
}()
vDSP_DCT_Execute(dctSetup_INVERSE, forwardDCT, &inverseDCT)
// 정규화
var divisor = Float(count)
vDSP_vsdiv(inverseDCT, stride, &divisor, &inverseDCT, stride, count)*
예시-halftone-descreening
let fftSetUp: FFTSetup = {
let log2n = vDSP_Length(log2(1024.0 * 1024.0))
guard let fftSetUp = vDSP_create_fftsetup(log2n, FFTRadix(kFFTRadix2)) else {
fatalError("can't create FFT Setup")
}
return fftSetUp
} ()
let sourceImage_floatPixels_frequency = DSPSplitComplex(
realp: &sourceImage_floatPixelsReal_spatial,
imagp: &sourceImage_floatPixelsImag_frequency)
vDSP_fft2d_zrop(fftSetup, &sourceImageSplitComplex, vDSP_Stride(1), vDSP_Stride(0),
&sourceImage_floatPixels_frequency, vDSP_Stride(1), vDSP_Stride(0),
vDSP_Length(log2(Float(width))),
vDSP_Length(log2(Float(height))),
FFTDirection(kFFTDirection_Forward))
// 짝수번째 픽셀은 실수, 홀수번쨰 픽셀은 가수
var floatPixels_spatial = DSPSplitComplex(realp: &floatPixelsReal_spatial,
imagp: &floatPixelsImage_spatial)
vDSP_fft2d_zrop(fftSetUp, &sourceImage_floatPixels_frequency,
stride, 0,
&floatPixels_spatial,
stride, 0,
vDSP_Length(log2(Float(width))),
vDSP_Length(log2(Float(height))),
FFTDirection(kFFTDirectional_Inverse))
simd
let x = simd_float4(1, 2, 3, 4)
let y = simd_float4(3, 3, 3 ,3)
let z = 0.5 * (x + y)
let original = simd_float3(0, 0, 1)
let quaternion = simd_quatf(angle: .pi / -3,
axis: simd_float3(1,0,0))
let quaternion2 = simd_quatf(angle: .pi / 3,
axis: simd_float3(0,1,0))
let quaternion3 = quaternion2 * quaternion // 교환법칙 미성립
let rotatedVector = simd_act(quaternion3, original)
// Slerp Interpolation -> 날카로운 방향전환
let blue = simd_quatf(...)
let green = simd_quatf(...)
let red = simd_quatf(...)
for t: Float in stride(from: 0, to: 1, by: 0.001) {
let q = simd_slerp(blue, green, t)
// q.act(original)의 결과로 얻은 지점에 Line Segment 추가.
}
for t: Float in stride(from: 0, to: 1, by: 0.001) {
let q = simd_slerp_longest(green, red, t)
// q.act(original)의 결과로 얻은 지점에 Line Segment 추가.
}
// Spline Interpolation -> 부드러운 방향전환
let original = simd_float3(0, 0, 1)
let rotations: [simd_quatf] = ...
for i in 1 ... rotations.count - 3 {
for t: Float in stride(from: 0, to: 1, by: 0.001) {
let q = simd_spline(rotations[i-1],
rotations[i],
rotations[i+1],
rotations[i+2],
t)
// q.act(original)의 결과로 얻은 지점에 Line Segment 추가.
}
}
vImage
워크플로우
이펙트 예시 - color saturation
var preBias: Int16 = -128
// Fixed-Point로 전환(Q12)
let divisor: Int32 = 0x1000 // 2^12
var postBias: Int32 = 128 * divisor
var matrix = [ Int16(saturation * Float(divisor)) ]
vImageMatrixMultiply_Planar8(&source, &destinations, 1, 1,
&matrix, divisor, &preBias, &postBias,
vImage_Flags(kvImageNoFlags))
// AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
// Get CVImageBuffer from CMSampleBuffer
let pixelBuffer = sampleBuffer.imageBuffer
// CPU가 이 메모리 영역에 접근할 수 있도록 함
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
// vImage 적용
...
// 메모리를 다시 카메라에게 돌려줌
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
}
// Luminalce를 위한 vImage InputBuffer 준비
let lumaBaseAddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0)
let lumaWidth = CVPixelBufferGetWidthOfPlane(pixelBuffer, 0)
let lumaHeight = CVPixelBufferGetHeightOfPlane(pixelBuffer, 0)
let lumaRowBytes = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0)
var sourceLumaBuffer = vImage_Buffer(data: lumaBaseAddress,
height: vImagePixelCount(lumaHeight),
width: vImagePixelCount(lumaWidth),
rowBytes: lumaRowBytes)
// Chrominance를 위한 vImage Input Buffer 준비
// ..
// output buffer 준비
var destinationBuffer = vImage_Buffer()
// 메모리를 미리 준비
vImageBuffer_Init(&destinationBuffer,
sourceLumaBuffer.height, sourceLumaBuffer.width,
cgImageFormat.bitsPerPixel, vImage_Flags(kvImageNoFlags))
vImageConvert_420Yp8_CbCr8ToARGB8888(&sourceLumaBuffer, &sourceChromaBuffer,
&destinationBuffer, &infoYpCbCrToARGB,
nil, 255, vImage_Flags(kvImageNoFlags))
// 이미지 버퍼를 복사하지 않는다.
let cgImage = vImageCreateCGImageFromBuffer(&destinationBuffer, &cgImageFormat,
nil, nil, vImage_Flags(kvImageNoFlags),
&error)
if let cgImage = cgImage, error == kvImageNoError {
DispatchQueue.main.async {
self.imageView.image = UIImage(cgImage: cgImage.takeRetainedValue())
}
}
Rotation
let backColor: [UInt8] = [255, 255, 255, 255]
vImageRotate_ARGB8888(&destinationBuffer, &destinationBuffer, nil,
fxValue, backColor, vImage_Flags(kvImageBackgroundColorFill))
Blur
vImageTentConvolve_ARGB8888(&tmpBuffer, &destinationBuffer, nil,
0, 0, kernelSize, kernelSize, nil,
vImage_Flags(kvImageEdgeExtend))
Dither
vImageConvert_Planar8toPlanar1(&sourceLumaBuffer,
&ditheredLuma,
nil,
Int32(kvImageConvert_DitherAtkinson),
vImage_Flags(kvImageNoFlags))
Color quantization
var lookUpTable = (0...255).map {
return Pixel_8(($0/qualtizationLevel) * qualtizationLevel)
}
vImageTableLookUp_ARGB8888(&destinationBuffer, &destinationBuffer,
nil, &lookUpTable, &lookUpTable, &lookUpTable,
vImage_Flags(kvImageNoFlags))
LINPACK 벤치마크