Liveness Verification API
Welcome to the LumiID integration guide. This API provides sovereign digital identity infrastructure designed to detect real human presence and prevent spoofing attacks such as photos, videos, or masks.
Authentication
All requests require a Bearer Token in the header. Obtain your key from the LumiID Dashboard.
Authorization: Bearer <your_api_key>
Step 1: Generate a Challenge
Call this endpoint to initialize a session and retrieve the randomized movement instructions.
Successful Response
{
"success": true,
"code": "CHALLENGE_GENERATED",
"message": "Challenge generated successfully.",
"summary": {
"verified": false,
"verification_type": "LIVENESS_CHALLENGE"
},
"data": {
"session_id": "cc0d9010-1574-43e7-9a9f-ae823ae18b57",
"challenge": [
"center",
"up",
"right"
]
},
"meta": {
"request_id": "req_dea1dcc6044a",
"timestamp": "2026-03-18T03:22:31.967258+00:00",
"api_version": "1.0"
}
}
- session_id: A short-lived ID expiring in 5 minutes.
- challenge: Ordered list of movements. UI must instruct the user to follow this exact order.
Step 2: Verify Liveness
Submit the captured frames as Base64 strings for biometric analysis.
| Field | Type | Description |
|---|---|---|
| session_id | String | Received from Challenge endpoint. |
| frames | Object | Key-value pair of movement and Base64 image. |
| threshold | Float | Optional. Default: 0.75. |
Example Request Body
{
"session_id": "sess_987654321",
"frames": {
"center": "data:image/jpeg;base64,...",
"up": "data:image/jpeg;base64,...",
"right": "data:image/jpeg;base64,..."
}
}
Understanding the Response
The engine provides granular metadata for audit logs and identity assurance.
{
"success": true,
"summary": {
"verified": true,
"verification_type": "LIVENESS_CHECK"
},
"data": {
"status": "VERIFIED",
"challenge_performed": {
"center",
"down",
"right"
},
"metadata": {
"center": {"yaw": -0.58, "pitch": 1.90}
"down": {"yaw": -0.79, "pitch": 7.34}
"right": {"yaw": 27.23, "pitch": 4.92}
}
}
}
Mobile Implementation (Web-to-Native)
For native apps, we recommend the LumiID WebView Bridge. This allows the JS SDK to signal success to your Swift or Kotlin code.
<script src="https://lumiid.com/static/v1/sdk/lumiid.js" >/script>
const lumi = new LumiID({
apiKey: "YOU_API_KEY",
containerId: "lumiid-container",
baseUrl: "https://api.lumiid.com",
onSuccess: (data) => {
console.log("Verified!", data);
alert("Identity Confirmed");
},
onFailure: (err) => {
console.error("Verification failed", err);
}
});
import UIKit
import AVFoundation
import Vision
class LumiIDNativeScannerVC: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
// MARK: - UI Elements
private let previewView = UIView()
private let instructionLabel = UILabel()
private let captureButton = UIButton()
private let progressView = UIProgressView(progressViewStyle: .bar)
// MARK: - Properties
private var captureSession: AVCaptureSession?
private var videoOutput = AVCaptureVideoDataOutput()
private var currentChallenge: [String] = []
private var capturedFrames: [String: String] = [:]
private var stepIndex = 0
private var sessionID: String?
private var isProcessing = false
private var lastFrameAsBase64: String?
override func viewDidLoad() {
super.viewDidLoad()
setupUI()
checkPermissions()
}
// MARK: - UI & Permissions Setup
private func setupUI() {
view.backgroundColor = .black
// Setup Instruction Label
instructionLabel.translatesAutoresizingMaskIntoConstraints = false
instructionLabel.textColor = .white
instructionLabel.textAlignment = .center
instructionLabel.font = .systemFont(ofSize: 20, weight: .bold)
instructionLabel.numberOfLines = 0
instructionLabel.text = "Initializing LumiID..."
view.addSubview(instructionLabel)
// Setup Capture Button
captureButton.translatesAutoresizingMaskIntoConstraints = false
captureButton.backgroundColor = .white
captureButton.layer.cornerRadius = 35
captureButton.setTitle("CAPTURE", for: .normal)
captureButton.setTitleColor(.black, for: .normal)
captureButton.titleLabel?.font = .systemFont(ofSize: 12, weight: .black)
captureButton.addTarget(self, action: #selector(handleCaptureTap), for: .touchUpInside)
view.addSubview(captureButton)
// Constraints
NSLayoutConstraint.activate([
instructionLabel.topAnchor.constraint(equalTo: view.safeAreaLayoutGuide.topAnchor, constant: 40),
instructionLabel.leadingAnchor.constraint(equalTo: view.leadingAnchor, constant: 20),
instructionLabel.trailingAnchor.constraint(equalTo: view.trailingAnchor, constant: -20),
captureButton.bottomAnchor.constraint(equalTo: view.safeAreaLayoutGuide.bottomAnchor, constant: -50),
captureButton.centerXAnchor.constraint(equalTo: view.centerXAnchor),
captureButton.widthAnchor.constraint(equalToConstant: 70),
captureButton.heightAnchor.constraint(equalToConstant: 70)
])
}
private func checkPermissions() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
startLivenessSession()
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { granted in
if granted { DispatchQueue.main.async { self.startLivenessSession() } }
}
default:
instructionLabel.text = "Camera access denied. Please enable in Settings."
}
}
// MARK: - Step 1: Initialize Session
private func startLivenessSession() {
// CLIENT NOTE: Replace with actual API call to GET /v1/liveness/challenge/
self.sessionID = "sess_987654321"
self.currentChallenge = ["center", "up", "right"]
self.stepIndex = 0
DispatchQueue.main.async {
self.updateInstruction()
self.setupCamera()
}
}
private func updateInstruction() {
guard stepIndex < currentChallenge.count else {
instructionLabel.text = "Verifying Identity..."
submitToLumiID()
return
}
let action = currentChallenge[stepIndex].uppercased()
instructionLabel.text = "STEP \(stepIndex + 1)\nLook \(action)"
}
// MARK: - Step 2: Camera Engine
private func setupCamera() {
captureSession = AVCaptureSession()
captureSession?.sessionPreset = .vga640x480
guard let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front),
let input = try? AVCaptureDeviceInput(device: device) else { return }
if captureSession!.canAddInput(input) { captureSession!.addInput(input) }
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "lumiid.queue"))
if captureSession!.canAddOutput(videoOutput) { captureSession!.addOutput(videoOutput) }
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.insertSublayer(previewLayer, at: 0)
DispatchQueue.global(qos: .userInitiated).async {
self.captureSession?.startRunning()
}
}
// MARK: - Step 3: Capture Handler
@objc private func handleCaptureTap() {
guard !isProcessing, let frame = lastFrameAsBase64 else { return }
let currentStep = currentChallenge[stepIndex]
capturedFrames[currentStep] = frame
stepIndex += 1
updateInstruction()
}
// MARK: - Step 4: API Submission
private func submitToLumiID() {
isProcessing = true
captureSession?.stopRunning()
let payload: [String: Any] = [
"session_id": sessionID ?? "",
"frames": capturedFrames
]
// CLIENT NOTE: Replace with your actual LumiID URL and API Token
guard let url = URL(string: "https://api.lumiid.test:8000/v1/face-liveness/") else { return }
var request = URLRequest(url: url)
request.httpMethod = "POST"
request.addValue("application/json", forHTTPHeaderField: "Content-Type")
request.addValue("Bearer YOUR_API_TOKEN", forHTTPHeaderField: "Authorization")
request.httpBody = try? JSONSerialization.data(withJSONObject: payload)
URLSession.shared.dataTask(with: request) { data, _, _ in
DispatchQueue.main.async {
self.instructionLabel.text = "Verification Submitted"
// Handle navigation or success alerts here
self.dismiss(animated: true)
}
}.resume()
}
// MARK: - Delegate: Frame Processing
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
if let cgImage = context.createCGImage(ciImage, from: ciImage.extent) {
// .leftMirrored is standard for the front-facing camera
let uiImage = UIImage(cgImage: cgImage, scale: 1.0, orientation: .leftMirrored)
if let imageData = uiImage.jpegData(compressionQuality: 0.6) {
self.lastFrameAsBase64 = "data:image/jpeg;base64,\(imageData.base64EncodedString())"
}
}
}
}
Android Manifest Permissions
<uses-permission android:name="android.permission.CAMERA" />
<uses-permission android:name="android.hardware.camera" />
<uses-permission android:name="android.hardware.camera.front" />
import android.os.Bundle
import androidx.appcompat.app.AppCompatActivity
import androidx.camera.core.*
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.content.ContextCompat
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
class LumiIDNativeScannerActivity : AppCompatActivity() {
private lateinit var cameraExecutor: ExecutorService
private var imageAnalyzer: ImageAnalysis? = null
private var currentChallenge = listOf("center", "up", "right")
private var stepIndex = 0
private val capturedFrames = mutableMapOf<String, String>()
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_lumi_id_scanner)
cameraExecutor = Executors.newSingleThreadExecutor()
startCamera()
}
private fun startCamera() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener({
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
// Preview setup
val preview = Preview.Builder().build().also {
it.setSurfaceProvider(viewFinder.surfaceProvider)
}
// Frame Analysis (VGA 640x480 for API performance)
imageAnalyzer = ImageAnalysis.Builder()
.setTargetResolution(android.util.Size(640, 480))
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
.also {
it.setAnalyzer(cameraExecutor, { imageProxy ->
processImageProxy(imageProxy)
})
}
val cameraSelector = CameraSelector.DEFAULT_FRONT_CAMERA
try {
cameraProvider.unbindAll()
cameraProvider.bindToLifecycle(this, cameraSelector, preview, imageAnalyzer)
} catch(exc: Exception) {
// Handle Errors
}
}, ContextCompat.getMainExecutor(this))
}
private fun processImageProxy(imageProxy: ImageProxy) {
// 1. Convert ImageProxy to Bitmap
// 2. Rotate if necessary
// 3. Compress to JPEG & Base64
// 4. Update lastFrameAsBase64
imageProxy.close()
}
private fun handleCapture() {
if (stepIndex < currentChallenge.size) {
val currentStep = currentChallenge[stepIndex]
capturedFrames[currentStep] = lastFrameAsBase64
stepIndex++
updateUI()
} else {
submitToLumiID()
}
}
private fun submitToLumiID() {
// Use Retrofit or OkHttp to POST to:
// https://api.lumiid.test:8000/v1/face-liveness/
// Payload: { "session_id": "...", "frames": capturedFrames }
}
override fun onDestroy() {
super.onDestroy()
cameraExecutor.shutdown()
}
}
Integration Best Practices
Image Quality
Ensure well-lit environments. Low light increases sensor noise, potentially triggering spoofing false positives.
Optimization
To reduce network latency in the African market, resize images to 640px max width before Base64 conversion.
Security Note: Always verify the request_id on your backend. Never trust client-side verified: true flags for final transaction approval.