NSError*error;
self.captureDevice = [AVCaptureDevicedefaultDeviceWithMediaType:AVMediaTypeVideo];
self.input = [AVCaptureDeviceInputdeviceInputWithDevice:self.captureDevice error:&error];
if (error) {
NSLog(@"%@", [error domain]);
return;
}
self.captureSession= [[AVCaptureSession alloc] init];
self.captureMetadataOutput = [[AVCaptureMetadataOutput alloc] init];
[_captureSession addInput:self.input];
[_captureSession addOutput:self.captureMetadataOutput];
[self.captureMetadataOutput setMetadataObjectsDelegate:self queue:dispatch_get_main_queue()];
[self.captureMetadataOutput setMetadataObjectTypes:@[AVMetadataObjectTypeQRCode]];
AVCaptureVideoPreviewLayer*layer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:self.captureSession];
[layer setVideoGravity:AVLayerVideoGravityResizeAspectFill];
[layer setFrame:self.view.layer.bounds];
[self.view.layeraddSublayer:layer];
self.videoPreviewLayer = layer;
[self.captureSession startRunning];
扫描效率真的很高,没有和现有第三方库对比过效率
这时扫描的范围为全屏幕
需要用到AVCaptureMetadataOutput类的rectOfInterest属性。
rectOfInterest的默认值是CGRectMake(0,0,1,1)
0和1是相对于屏幕的比例,不是具体的数值。
且x和y,width和height的位置都互换了。即CGRectMake(y,x,H,W) (我怀疑是因为iPhone的摄像头在屏幕右上角的原因😄 )
[self.captureMetadataOutput setRectOfInterest:CGRectMake(0, 0.5, 0.5, 0.5)];//左上角1/4屏幕
[self.captureMetadataOutput setRectOfInterest:CGRectMake(0.5, 0.5, 0.5, 0.5)];//左下角1/4 屏幕
[self.captureMetadataOutput setRectOfInterest:CGRectMake(0.5, 0, 0.5, 0.5)]; //右下角1/4屏幕
[self.captureMetadataOutput setRectOfInterest:CGRectMake(0, 0, 0.5, 0.5)]; //右上角1/4屏幕
[self.captureMetadataOutput setRectOfInterest:CGRectMake(24/ScreenHigh, ((ScreenWidth-220)*0.5)/ScreenWidth,220/ScreenHigh,220/ScreenWidth)]; //设置自定义像素点的 位置
[self.captureMetadataOutput setRectOfInterest:CGRectMake(0.25,0.25, 0.5, 0.5)]; //貌似 中间的感觉!!!
OK !!!
有问题欢迎回复