IOS4直接获取摄像头数据

需要添加的framework:CoreMedia,CoreVideo,QuartzCore,AVFoundation
MyAVController.h:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
#import <UIKit/UIKit.h>
#import <AVFoundation/AVFoundation.h>
#import <CoreGraphics/CoreGraphics.h>
#import <CoreVideo/CoreVideo.h>
#import <CoreMedia/CoreMedia.h>
 
@interface MyAVController : UIViewController <
AVCaptureVideoDataOutputSampleBufferDelegate> {
     AVCaptureSession *_captureSession;
     UIImageView *_imageView;
     CALayer *_customLayer;
     AVCaptureVideoPreviewLayer *_prevLayer;
}
 
@property (nonatomic, retain) AVCaptureSession *captureSession;
@property (nonatomic, retain) UIImageView *imageView;
@property (nonatomic, retain) CALayer *customLayer;
@property (nonatomic, retain) AVCaptureVideoPreviewLayer *prevLayer;
- (void)initCapture;
 
@end
 
MyAVController.m:
 
#import "MyAVController.h"
 
@implementation MyAVController
 
@synthesize captureSession = _captureSession;
@synthesize imageView = _imageView;
@synthesize customLayer = _customLayer;
@synthesize prevLayer = _prevLayer;
 
#pragma mark -
#pragma mark Initialization
- (id)init {
     self = [ super  init];
     if  (self) {
         self.imageView = nil;
         self.prevLayer = nil;
         self.customLayer = nil;
     }
     return  self;
}
 
- (void)viewDidLoad {
     [self initCapture];
}
 
- (void)initCapture {
     AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput
                      deviceInputWithDevice:[AVCaptureDevice
defaultDeviceWithMediaType:AVMediaTypeVideo]  error:nil];
     AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc]
init];
     captureOutput.alwaysDiscardsLateVideoFrames = YES;
     //captureOutput.minFrameDuration = CMTimeMake(1, 10);
 
     dispatch_queue_t queue;
     queue = dispatch_queue_create( "cameraQueue" , NULL);
     [captureOutput setSampleBufferDelegate:self queue:queue];
     dispatch_release(queue);
     NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
     NSNumber* value = [NSNumber
numberWithUnsignedInt:kCVPixelFormatType_32BGRA];
     NSDictionary* videoSettings = [NSDictionary
dictionaryWithObject:value forKey:key];
     [captureOutput setVideoSettings:videoSettings];
     self.captureSession = [[AVCaptureSession alloc] init];
     [self.captureSession addInput:captureInput];
     [self.captureSession addOutput:captureOutput];
     [self.captureSession startRunning];
     self.customLayer = [CALayer layer];
     self.customLayer.frame = self.view.bounds;
     self.customLayer.transform = CATransform3DRotate(
CATransform3DIdentity, M_PI/2.0f, 0, 0, 1);
     self.customLayer.contentsGravity = kCAGravityResizeAspectFill;
     [self.view.layer addSublayer:self.customLayer];
     self.imageView = [[UIImageView alloc] init];
     self.imageView.frame = CGRectMake(0, 0, 100, 100);
      [self.view addSubview:self.imageView];
     self.prevLayer = [AVCaptureVideoPreviewLayer
layerWithSession: self.captureSession];
     self.prevLayer.frame = CGRectMake(100, 0, 100, 100);
     self.prevLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
     [self.view.layer addSublayer: self.prevLayer];
}
 
#pragma mark -
#pragma mark AVCaptureSession delegate
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
        fromConnection:(AVCaptureConnection *)connection
{
 
     NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
 
     CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
     CVPixelBufferLockBaseAddress(imageBuffer,0);
     uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
     size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
     size_t width = CVPixelBufferGetWidth(imageBuffer);
     size_t height = CVPixelBufferGetHeight(imageBuffer);
 
     CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
     CGContextRef newContext = CGBitmapContextCreate(baseAddress,
  width, height, 8, bytesPerRow, colorSpace,
kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
     CGImageRef newImage = CGBitmapContextCreateImage(newContext);
 
     CGContextRelease(newContext);
     CGColorSpaceRelease(colorSpace);
 
     [self.customLayer performSelectorOnMainThread:@selector(setContents:)
withObject: (id) newImage waitUntilDone:YES];
 
     UIImage *image= [UIImage imageWithCGImage:newImage scale:1.0
orientation:UIImageOrientationRight];
 
     CGImageRelease(newImage);
 
     [self.imageView performSelectorOnMainThread:@selector(setImage:)
withObject:image waitUntilDone:YES];
 
     CVPixelBufferUnlockBaseAddress(imageBuffer,0);
 
     [pool drain];
}
 
#pragma mark -
#pragma mark Memory management
 
- (void)viewDidUnload {
     self.imageView = nil;
     self.customLayer = nil;
     self.prevLayer = nil;
}
 
- (void)dealloc {
     [self.captureSession release];
     [ super  dealloc];
}
 
@end

原文来自:http://www.benjaminloulier.com/articles/ios4-and-direct-access-to-the-camera

基于html+python+Apriori 算法、SVD(奇异值分解)的电影推荐算法+源码+项目文档+算法解析+数据集,适合毕业设计、课程设计、项目开发。项目源码已经过严格测试,可以放心参考并在此基础上延申使用,详情见md文档 电影推荐算法:Apriori 算法、SVD(奇异值分解)推荐算法 电影、用户可视化 电影、用户管理 数据统计 SVD 推荐 根据电影打分进行推荐 使用 svd 模型计算用户对未评分的电影打分,返回前 n 个打分最高的电影作为推荐结果 n = 30 for now 使用相似电影进行推荐 根据用户最喜欢的前 K 部电影,分别计算这 K 部电影的相似电影 n 部,返回 K*n 部电影进行推荐 K = 10 and n = 5 for now 根据相似用户进行推荐 获取相似用户 K 个,分别取这 K 个用户的最喜爱电影 n 部,返回 K*n 部电影进行推荐 K = 10 and n = 5 for now Redis 使用 Redis 做页面访问次数统计 缓存相似电影 在使用相似电影推荐的方式时,每次请求大概需要 6.6s(需要遍历计算与所有电影的相似度)。 将相似电影存储至 redis 中(仅存储 movie_id,拿到 movie_id 后还是从 mysql 中获取电影详细信息), 时间缩短至:93ms。 十部电影,每部存 top 5 similar movie 登录了 1-6 user并使用了推荐系统,redis 中新增了 50 部电影的 similar movie,也就是说,系统只为 6 为用户计算了共 60 部电影的相似度,其中就有10 部重复电影。 热点电影重复度还是比较高的
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值