This commit is contained in:
Mathias Westerdahl 2017-02-22 23:38:02 +01:00
parent 3441da3ec4
commit acbce5247f
8 changed files with 728 additions and 6 deletions

18
camera/ext.manifest Normal file
View File

@ -0,0 +1,18 @@
name: "Camera"
platforms:
x86_64-osx:
context:
frameworks: ["AVFoundation", "CoreMedia"]
x86-osx:
context:
frameworks: ["AVFoundation", "CoreMedia"]
arm64-ios:
context:
frameworks: ["AVFoundation", "CoreMedia"]
armv7-ios:
context:
frameworks: ["AVFoundation", "CoreMedia"]

183
camera/src/camera.cpp Normal file
View File

@ -0,0 +1,183 @@
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#define EXTENSION_NAME Camera
#define LIB_NAME "Camera"
#define MODULE_NAME "camera"
// Defold SDK
#define DLIB_LOG_DOMAIN LIB_NAME
#include <dmsdk/sdk.h>
#if defined(DM_PLATFORM_IOS) || defined(DM_PLATFORM_OSX)
#include "camera_private.h"
struct DefoldCamera
{
// The buffer that receives the pixel data
dmBuffer::HBuffer m_VideoBuffer;
// We create the buffer once, and keep a reference to it throughout
// the capture.
int m_VideoBufferLuaRef;
// Information about the currently set camera
CameraInfo m_Params;
};
DefoldCamera g_DefoldCamera;
static int StartCapture(lua_State* L)
{
DM_LUA_STACK_CHECK(L, 1);
CameraType type = (CameraType) luaL_checkint(L, 1);
int status = CameraPlatform_StartCapture(&g_DefoldCamera.m_VideoBuffer, type, g_DefoldCamera.m_Params);
lua_pushboolean(L, status > 0);
if( status == 0 )
{
printf("capture failed!\n");
return 1;
}
const uint32_t size = g_DefoldCamera.m_Params.m_Width * g_DefoldCamera.m_Params.m_Height;
{
uint8_t* data = 0;
uint32_t datasize = 0;
dmBuffer::GetBytes(g_DefoldCamera.m_VideoBuffer, (void**)&data, &datasize);
}
// Increase ref count
dmScript::PushBuffer(L, g_DefoldCamera.m_VideoBuffer);
g_DefoldCamera.m_VideoBufferLuaRef = dmScript::Ref(L, LUA_REGISTRYINDEX);
return 1;
}
static int StopCapture(lua_State* L)
{
DM_LUA_STACK_CHECK(L, 0);
int status = CameraPlatform_StopCapture();
if( !status )
{
luaL_error(L, "Failed to stop capture. Was it started?");
}
dmScript::Unref(L, LUA_REGISTRYINDEX, g_DefoldCamera.m_VideoBufferLuaRef); // We want it destroyed by the GC
return 0;
}
static int GetInfo(lua_State* L)
{
DM_LUA_STACK_CHECK(L, 1);
lua_newtable(L);
lua_pushstring(L, "width");
lua_pushnumber(L, g_DefoldCamera.m_Params.m_Width);
lua_rawset(L, -3);
lua_pushstring(L, "height");
lua_pushnumber(L, g_DefoldCamera.m_Params.m_Height);
lua_rawset(L, -3);
lua_pushstring(L, "bytes_per_pixel");
lua_pushnumber(L, 3);
lua_rawset(L, -3);
lua_pushstring(L, "type");
lua_pushinteger(L, g_DefoldCamera.m_Params.m_Type);
lua_rawset(L, -3);
return 1;
}
static int GetFrame(lua_State* L)
{
DM_LUA_STACK_CHECK(L, 1);
lua_rawgeti(L,LUA_REGISTRYINDEX, g_DefoldCamera.m_VideoBufferLuaRef);
return 1;
}
static const luaL_reg Module_methods[] =
{
{"start_capture", StartCapture},
{"stop_capture", StopCapture},
{"get_frame", GetFrame},
{"get_info", GetInfo},
{0, 0}
};
static void LuaInit(lua_State* L)
{
int top = lua_gettop(L);
luaL_register(L, MODULE_NAME, Module_methods);
#define SETCONSTANT(name) \
lua_pushnumber(L, (lua_Number) name); \
lua_setfield(L, -2, #name);\
SETCONSTANT(CAMERA_TYPE_FRONT)
SETCONSTANT(CAMERA_TYPE_BACK)
#undef SETCONSTANT
lua_pop(L, 1);
assert(top == lua_gettop(L));
}
dmExtension::Result AppInitializeCamera(dmExtension::AppParams* params)
{
return dmExtension::RESULT_OK;
}
dmExtension::Result InitializeCamera(dmExtension::Params* params)
{
LuaInit(params->m_L);
return dmExtension::RESULT_OK;
}
dmExtension::Result AppFinalizeCamera(dmExtension::AppParams* params)
{
return dmExtension::RESULT_OK;
}
dmExtension::Result FinalizeCamera(dmExtension::Params* params)
{
return dmExtension::RESULT_OK;
}
#else // unsupported platforms
static dmExtension::Result AppInitializeCamera(dmExtension::AppParams* params)
{
dmLogInfo("Registered %s (null) Extension\n", MODULE_NAME);
return dmExtension::RESULT_OK;
}
static dmExtension::Result InitializeCamera(dmExtension::Params* params)
{
return dmExtension::RESULT_OK;
}
static dmExtension::Result AppFinalizeCamera(dmExtension::AppParams* params)
{
return dmExtension::RESULT_OK;
}
static dmExtension::Result FinalizeCamera(dmExtension::Params* params)
{
return dmExtension::RESULT_OK;
}
#endif // platforms
DM_DECLARE_EXTENSION(EXTENSION_NAME, LIB_NAME, AppInitializeCamera, AppFinalizeCamera, InitializeCamera, 0, 0, FinalizeCamera)

392
camera/src/camera.mm Normal file
View File

@ -0,0 +1,392 @@
#include <dmsdk/sdk.h>
#include "camera_private.h"
#if defined(DM_PLATFORM_IOS) || defined(DM_PLATFORM_OSX)
#include <AVFoundation/AVFoundation.h>
// Some good reads on capturing camera/video for iOS/macOS
// http://easynativeextensions.com/camera-tutorial-part-4-connect-to-the-camera-in-objective-c/
// https://developer.apple.com/library/content/qa/qa1702/_index.html
// http://stackoverflow.com/questions/19422322/method-to-find-devices-camera-resolution-ios
// http://stackoverflow.com/a/32047525
@interface CameraCaptureDelegate : NSObject <AVCaptureVideoDataOutputSampleBufferDelegate>
{
@private AVCaptureSession* m_captureSession;
@private AVCaptureDevice* m_camera;
@private AVCaptureDeviceInput* m_cameraInput;
@private AVCaptureVideoDataOutput* m_videoOutput;
@private dispatch_queue_t m_Queue;
@public CMVideoDimensions m_Size;
}
@end
struct IOSCamera
{
CameraCaptureDelegate* m_Delegate;
dmBuffer::HBuffer m_VideoBuffer;
// TODO: Support audio buffers
IOSCamera() : m_Delegate(0)
{
}
};
IOSCamera g_Camera;
@implementation CameraCaptureDelegate
- ( id ) init
{
self = [ super init ];
m_captureSession = NULL;
m_camera = NULL;
m_cameraInput = NULL;
m_videoOutput = NULL;
return self;
}
- ( void ) onVideoError: ( NSString * ) error
{
NSLog(@"%@",error);
}
- ( void ) onVideoStart: ( NSNotification * ) note
{
// perhaps add callback
}
- ( void ) onVideoStop: ( NSNotification * ) note
{
// perhaps add callback
}
// Delegate routine that is called when a sample buffer was written
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection
{
if( captureOutput == m_videoOutput )
{
uint8_t* data = 0;
uint32_t datasize = 0;
dmBuffer::GetBytes(g_Camera.m_VideoBuffer, (void**)&data, &datasize);
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,0);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
if( width != g_Camera.m_Delegate->m_Size.width || height != g_Camera.m_Delegate->m_Size.height )
{
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
return;
}
uint8_t* pixels = (uint8_t*)CVPixelBufferGetBaseAddress(imageBuffer);
for( int y = 0; y < height; ++y )
{
for( int x = 0; x < width; ++x )
{
// RGB < BGR(A)
#if defined(DM_PLATFORM_IOS)
// Flip X
data[y*width*3 + x*3 + 2] = pixels[y * bytesPerRow + bytesPerRow - (x+1) * 4 + 0];
data[y*width*3 + x*3 + 1] = pixels[y * bytesPerRow + bytesPerRow - (x+1) * 4 + 1];
data[y*width*3 + x*3 + 0] = pixels[y * bytesPerRow + bytesPerRow - (x+1) * 4 + 2];
#else
// Flip X + Y
data[y*width*3 + x*3 + 2] = pixels[(height - y - 1) * bytesPerRow + bytesPerRow - (x+1) * 4 + 0];
data[y*width*3 + x*3 + 1] = pixels[(height - y - 1) * bytesPerRow + bytesPerRow - (x+1) * 4 + 1];
data[y*width*3 + x*3 + 0] = pixels[(height - y - 1) * bytesPerRow + bytesPerRow - (x+1) * 4 + 2];
#endif
}
}
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
dmBuffer::ValidateBuffer(g_Camera.m_VideoBuffer);
}
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didDropSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection
{
NSLog(@"DROPPING FRAME!!!");
}
// http://easynativeextensions.com/camera-tutorial-part-4-connect-to-the-camera-in-objective-c
- ( BOOL ) findCamera: (AVCaptureDevicePosition) cameraPosition
{
// 0. Make sure we initialize our camera pointer:
m_camera = NULL;
// 1. Get a list of available devices:
// specifying AVMediaTypeVideo will ensure we only get a list of cameras, no microphones
NSArray * devices = [ AVCaptureDevice devicesWithMediaType: AVMediaTypeVideo ];
// 2. Iterate through the device array and if a device is a camera, check if it's the one we want:
for ( AVCaptureDevice * device in devices )
{
if ( cameraPosition == [ device position ] )
{
m_camera = device;
}
}
// 3. Set a frame rate for the camera:
if ( NULL != m_camera )
{
#if defined(DM_PLATFORM_IOS)
// We firt need to lock the camera, so noone else can mess with its configuration:
if ( [ m_camera lockForConfiguration: NULL ] )
{
// Set a minimum frame rate of 10 frames per second
[ m_camera setActiveVideoMinFrameDuration: CMTimeMake( 1, 10 ) ];
// and a maximum of 30 frames per second
[ m_camera setActiveVideoMaxFrameDuration: CMTimeMake( 1, 30 ) ];
[ m_camera unlockForConfiguration ];
}
#endif
}
// 4. If we've found the camera we want, return true
return ( NULL != m_camera );
}
- ( BOOL ) attachCameraToCaptureSession
{
// 0. Assume we've found the camera and set up the session first:
assert( NULL != m_camera );
assert( NULL != m_captureSession );
// 1. Initialize the camera input
m_cameraInput = NULL;
// 2. Request a camera input from the camera
NSError * error = NULL;
m_cameraInput = [ AVCaptureDeviceInput deviceInputWithDevice: m_camera error: &error ];
// 2.1. Check if we've got any errors
if ( NULL != error )
{
// TODO: send an error event to ActionScript
return false;
}
// 3. We've got the input from the camera, now attach it to the capture session:
if ( [ m_captureSession canAddInput: m_cameraInput ] )
{
[ m_captureSession addInput: m_cameraInput ];
}
else
{
// TODO: send an error event to ActionScript
return false;
}
// 4. Done, the attaching was successful, return true to signal that
return true;
}
- ( void ) setupVideoOutput
{
// 1. Create the video data output
m_videoOutput = [ [ AVCaptureVideoDataOutput alloc ] init ];
// 2. Create a queue for capturing video frames
dispatch_queue_t captureQueue = dispatch_queue_create( "captureQueue", DISPATCH_QUEUE_SERIAL );
// 3. Use the AVCaptureVideoDataOutputSampleBufferDelegate capabilities of CameraDelegate:
[ m_videoOutput setSampleBufferDelegate: self queue: captureQueue ];
// 4. Set up the video output
// 4.1. Do we care about missing frames?
m_videoOutput.alwaysDiscardsLateVideoFrames = NO;
// 4.2. We want the frames in some RGB format, which is what ActionScript can deal with
NSNumber * framePixelFormat = [ NSNumber numberWithInt: kCVPixelFormatType_32BGRA ];
m_videoOutput.videoSettings = [ NSDictionary dictionaryWithObject: framePixelFormat
forKey: ( id ) kCVPixelBufferPixelFormatTypeKey ];
// 5. Add the video data output to the capture session
[ m_captureSession addOutput: m_videoOutput ];
}
- ( BOOL ) startCamera: (AVCaptureDevicePosition) cameraPosition
{
// 1. Find the back camera
if ( ![ self findCamera: cameraPosition ] )
{
return false;
}
//2. Make sure we have a capture session
if ( NULL == m_captureSession )
{
m_captureSession = [ [ AVCaptureSession alloc ] init ];
}
// 3. Choose a preset for the session.
// Optional TODO: You can parameterize this and set it in ActionScript.
//NSString * cameraResolutionPreset = AVCaptureSessionPreset640x480;
NSString * cameraResolutionPreset = AVCaptureSessionPreset1280x720;
// 4. Check if the preset is supported on the device by asking the capture session:
if ( ![ m_captureSession canSetSessionPreset: cameraResolutionPreset ] )
{
// Optional TODO: Send an error event to ActionScript
return false;
}
// 4.1. The preset is OK, now set up the capture session to use it
[ m_captureSession setSessionPreset: cameraResolutionPreset ];
// 5. Plug camera and capture sesiossion together
[ self attachCameraToCaptureSession ];
// 6. Add the video output
[ self setupVideoOutput ];
// 7. Set up a callback, so we are notified when the camera actually starts
[ [ NSNotificationCenter defaultCenter ] addObserver: self
selector: @selector( onVideoStart: )
name: AVCaptureSessionDidStartRunningNotification
object: m_captureSession ];
// 8. 3, 2, 1, 0... Start!
[ m_captureSession startRunning ];
// Note: Returning true from this function only means that setting up went OK.
// It doesn't mean that the camera has started yet.
// We get notified about the camera having started in the videoCameraStarted() callback.
return true;
}
- ( BOOL ) stopCamera
{
BOOL isRunning = [m_captureSession isRunning];
if( isRunning )
{
[m_captureSession stopRunning ];
}
return isRunning;
}
@end
// http://stackoverflow.com/a/32047525
CMVideoDimensions _CameraPlatform_GetSize(AVCaptureDevicePosition cameraPosition)
{
CMVideoDimensions max_resolution;
max_resolution.width = 0;
max_resolution.height = 0;
AVCaptureDevice* captureDevice = nil;
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices) {
/*
const char* position = "unspecified";
if( device.position == AVCaptureDevicePositionBack )
position = "back";
else if (device.position == AVCaptureDevicePositionFront )
position = "front";
NSString* localizedName = [NSString stringWithFormat:@"%@, position: %s", device.localizedName, position];
NSLog(@"%@", localizedName);
*/
if ([device position] == cameraPosition) {
captureDevice = device;
break;
}
}
if (captureDevice == nil) {
return max_resolution;
}
NSArray* availFormats=captureDevice.formats;
for (AVCaptureDeviceFormat* format in availFormats) {
#if defined(DM_PLATFORM_IOS)
CMVideoDimensions resolution = format.highResolutionStillImageDimensions;
#else
CMVideoDimensions resolution = CMVideoFormatDescriptionGetDimensions((CMVideoFormatDescriptionRef)[format formatDescription]);
#endif
int w = resolution.width;
int h = resolution.height;
if ((w * h) > (max_resolution.width * max_resolution.height)) {
max_resolution.width = w;
max_resolution.height = h;
}
}
return max_resolution;
}
int CameraPlatform_StartCapture(dmBuffer::HBuffer* buffer, CameraType type, CameraInfo& outparams)
{
if(g_Camera.m_Delegate == 0)
{
g_Camera.m_Delegate = [[CameraCaptureDelegate alloc] init];
}
AVCaptureDevicePosition cameraposition = AVCaptureDevicePositionUnspecified;
#if defined(DM_PLATFORM_IOS)
if( type == CAMERA_TYPE_BACK )
cameraposition = AVCaptureDevicePositionBack;
else if( type == CAMERA_TYPE_FRONT )
cameraposition = AVCaptureDevicePositionFront;
#endif
CMVideoDimensions dimensions = _CameraPlatform_GetSize(cameraposition);
g_Camera.m_Delegate->m_Size = dimensions;
outparams.m_Width = (uint32_t)dimensions.width;
outparams.m_Height = (uint32_t)dimensions.height;
uint32_t size = outparams.m_Width * outparams.m_Width;
dmBuffer::StreamDeclaration streams_decl[] = {
{dmHashString64("data"), dmBuffer::VALUE_TYPE_UINT8, 3}
};
dmBuffer::Allocate(size, streams_decl, 1, buffer);
g_Camera.m_VideoBuffer = *buffer;
BOOL started = [g_Camera.m_Delegate startCamera: cameraposition];
return started ? 1 : 0;
}
int CameraPlatform_StopCapture()
{
if(g_Camera.m_Delegate != 0)
{
[g_Camera.m_Delegate stopCamera];
[g_Camera.m_Delegate release];
}
return 1;
}
#endif // DM_PLATFORM_IOS

View File

@ -0,0 +1,20 @@
#pragma once
#include <dmsdk/sdk.h>
enum CameraType
{
CAMERA_TYPE_FRONT, // Selfie
CAMERA_TYPE_BACK
};
struct CameraInfo
{
uint32_t m_Width;
uint32_t m_Height;
CameraType m_Type;
};
extern int CameraPlatform_StartCapture(dmBuffer::HBuffer* buffer, CameraType type, CameraInfo& outparams);
extern int CameraPlatform_StopCapture();

View File

@ -1,5 +1,5 @@
[project] [project]
title = My project title = Camera
version = 0.1 version = 0.1
[bootstrap] [bootstrap]
@ -9,8 +9,8 @@ main_collection = /main/main.collectionc
game_binding = /input/game.input_bindingc game_binding = /input/game.input_bindingc
[display] [display]
width = 1280 width = 600
height = 720 height = 800
[physics] [physics]
scale = 0.02 scale = 0.02
@ -18,3 +18,12 @@ scale = 0.02
[script] [script]
shared_state = 1 shared_state = 1
[ios]
bundle_identifier = com.defold.camera
[android]
package = com.defold.camera
[osx]
bundle_identifier = com.defold.camera

6
main/camera.atlas Normal file
View File

@ -0,0 +1,6 @@
images {
image: "/main/images/logo.png"
}
margin: 0
extrude_borders: 0
inner_padding: 0

View File

@ -1,5 +1,60 @@
name: "main" name: "main"
scale_along_z: 0 scale_along_z: 0
embedded_instances {
id: "camera"
data: "components {\n"
" id: \"script\"\n"
" component: \"/main/main.script\"\n"
" position {\n"
" x: 0.0\n"
" y: 0.0\n"
" z: 0.0\n"
" }\n"
" rotation {\n"
" x: 0.0\n"
" y: 0.0\n"
" z: 0.0\n"
" w: 1.0\n"
" }\n"
"}\n"
"embedded_components {\n"
" id: \"sprite\"\n"
" type: \"sprite\"\n"
" data: \"tile_set: \\\"/main/camera.atlas\\\"\\n"
"default_animation: \\\"logo\\\"\\n"
"material: \\\"/builtins/materials/sprite.material\\\"\\n"
"blend_mode: BLEND_MODE_ALPHA\\n"
"\"\n"
" position {\n"
" x: 0.0\n"
" y: 0.0\n"
" z: 0.0\n"
" }\n"
" rotation {\n"
" x: 0.0\n"
" y: 0.0\n"
" z: 0.0\n"
" w: 1.0\n"
" }\n"
"}\n"
""
position {
x: 300.0
y: 400.0
z: 0.5
}
rotation {
x: 0.0
y: 0.0
z: 0.0
w: 1.0
}
scale3 {
x: 1.0
y: 1.0
z: 1.0
}
}
embedded_instances { embedded_instances {
id: "logo" id: "logo"
data: "embedded_components {\n" data: "embedded_components {\n"
@ -24,9 +79,9 @@ embedded_instances {
"}\n" "}\n"
"" ""
position { position {
x: 640.0 x: 95.39966
y: 340.0 y: 89.54209
z: 0.0 z: 1.0
} }
rotation { rotation {
x: 0.0 x: 0.0

39
main/main.script Normal file
View File

@ -0,0 +1,39 @@
function init(self)
local logosize = 128
local screen_width = sys.get_config("display.width", 600)
local screen_height = sys.get_config("display.height", 800)
local scale_width = screen_width / logosize
local scale_height = screen_height / logosize
go.set("#sprite", "scale", vmath.vector3(scale_width, scale_height, 1) )
if camera ~= nil and camera.start_capture(camera.CAMERA_TYPE_FRONT) then
self.cameraframe = camera.get_frame()
self.camerainfo = camera.get_info()
self.cameratextureheader = {width=self.camerainfo.width,
height=self.camerainfo.height,
type=resource.TEXTURE_TYPE_2D,
format=resource.TEXTURE_FORMAT_RGB,
num_mip_maps=1 }
else
print("could not start camera capture")
end
end
function final(self)
if self.cameraframe ~= nil then
camera.stop_capture()
end
end
function update(self, dt)
if self.cameraframe then
local pathmodelcamera = go.get("#sprite", "texture0")
resource.set_texture(pathmodelcamera, self.cameratextureheader, self.cameraframe)
end
end