Code Example:
# Computer Vision for security applications
import cv2
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
class SecurityImageAnalyzer:
def __init__(self):
self.face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
self.license_plate_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_russian_plate_number.xml')
def preprocess_security_image(self, image_path):
"""Preprocess image for security analysis"""
# Read image
img = cv2.imread(image_path)
if img is None:
return None
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Apply noise reduction
denoised = cv2.bilateralFilter(gray, 9, 75, 75)
# Enhance contrast
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
enhanced = clahe.apply(denoised)
return {
'original': img,
'grayscale': gray,
'processed': enhanced
}
def detect_faces(self, image):
"""Detect faces in security footage"""
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
faces = self.face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
return faces
def detect_motion(self, frame1, frame2, threshold=25):
"""Detect motion between two frames"""
# Convert frames to grayscale
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) if len(frame1.shape) == 3 else frame1
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY) if len(frame2.shape) == 3 else frame2
# Compute absolute difference
diff = cv2.absdiff(gray1, gray2)
# Apply threshold
_, thresh = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)
# Find contours
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Filter contours by area
motion_areas = []
for contour in contours:
area = cv2.contourArea(contour)
if area > 500: # Minimum area threshold
x, y, w, h = cv2.boundingRect(contour)
motion_areas.append((x, y, w, h, area))
return motion_areas, thresh
def analyze_image_anomalies(self, image):
"""Detect anomalies in security images"""
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if len(image.shape) == 3 else image
# Calculate image statistics
mean_intensity = np.mean(gray)
std_intensity = np.std(gray)
# Edge detection for texture analysis
edges = cv2.Canny(gray, 50, 150)
edge_density = np.sum(edges > 0) / edges.size
# Frequency domain analysis
f_transform = np.fft.fft2(gray)
f_shift = np.fft.fftshift(f_transform)
magnitude_spectrum = np.log(np.abs(f_shift))
# Detect unusual patterns
anomaly_score = 0
# Check for unusual brightness
if mean_intensity < 50 or mean_intensity > 200:
anomaly_score += 1
# Check for unusual contrast
if std_intensity < 10 or std_intensity > 80:
anomaly_score += 1
# Check for unusual edge density
if edge_density < 0.05 or edge_density > 0.3:
anomaly_score += 1
return {
'mean_intensity': mean_intensity,
'std_intensity': std_intensity,
'edge_density': edge_density,
'anomaly_score': anomaly_score,
'is_anomalous': anomaly_score >= 2
}
def extract_color_features(self, image):
"""Extract color-based features for image analysis"""
# Convert to different color spaces
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# Calculate color histograms
hist_b = cv2.calcHist([image], [0], None, [256], [0, 256])
hist_g = cv2.calcHist([image], [1], None, [256], [0, 256])
hist_r = cv2.calcHist([image], [2], None, [256], [0, 256])
# Calculate dominant colors using K-means
data = image.reshape((-1, 3))
data = np.float32(data)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 1.0)
k = 5 # Number of dominant colors
_, labels, centers = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
# Convert back to uint8
centers = np.uint8(centers)
return {
'color_histograms': {
'blue': hist_b.flatten(),
'green': hist_g.flatten(),
'red': hist_r.flatten()
},
'dominant_colors': centers,
'mean_color': np.mean(image, axis=(0, 1)),
'color_variance': np.var(image, axis=(0, 1))
}
def detect_tampering(self, image):
"""Detect potential image tampering"""
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if len(image.shape) == 3 else image
# Error Level Analysis (simplified)
# Look for inconsistencies in JPEG compression artifacts
# Calculate local variance
kernel = np.ones((5, 5), np.float32) / 25
mean_filtered = cv2.filter2D(gray.astype(np.float32), -1, kernel)
variance = cv2.filter2D((gray.astype(np.float32) - mean_filtered) ** 2, -1, kernel)
# Find regions with unusual variance patterns
variance_threshold = np.percentile(variance, 95)
suspicious_regions = variance > variance_threshold
# Additional checks
# Check for unusual noise patterns
noise = gray.astype(np.float32) - cv2.GaussianBlur(gray.astype(np.float32), (5, 5), 0)
noise_variance = np.var(noise)
tampering_indicators = {
'variance_anomalies': np.sum(suspicious_regions),
'noise_variance': noise_variance,
'suspicious_score': np.sum(suspicious_regions) / suspicious_regions.size
}
return tampering_indicators
# Image augmentation for security datasets
class SecurityImageAugmentation:
def __init__(self):
pass
def augment_surveillance_image(self, image):
"""Augment surveillance images for training"""
augmented_images = []
# Original image
augmented_images.append(image)
# Rotation (simulate different camera angles)
for angle in [-10, -5, 5, 10]:
center = (image.shape[1] // 2, image.shape[0] // 2)
rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, rotation_matrix, (image.shape[1], image.shape[0]))
augmented_images.append(rotated)
# Brightness variations (simulate different lighting)
for brightness in [-30, -15, 15, 30]:
bright_image = cv2.convertScaleAbs(image, alpha=1, beta=brightness)
augmented_images.append(bright_image)
# Gaussian noise (simulate camera noise)
noise = np.random.normal(0, 10, image.shape).astype(np.uint8)
noisy_image = cv2.add(image, noise)
augmented_images.append(noisy_image)
# Blur (simulate motion blur or focus issues)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
augmented_images.append(blurred)
return augmented_images
# Example usage
analyzer = SecurityImageAnalyzer()
# Simulate processing a security image
def process_security_image(image_path):
"""Complete security image analysis pipeline"""
# Preprocess image
processed = analyzer.preprocess_security_image(image_path)
if processed is None:
return "Error: Could not load image"
image = processed['original']
# Detect faces
faces = analyzer.detect_faces(image)
# Analyze for anomalies
anomalies = analyzer.analyze_image_anomalies(image)
# Extract color features
color_features = analyzer.extract_color_features(image)
# Check for tampering
tampering = analyzer.detect_tampering(image)
results = {
'faces_detected': len(faces),
'face_locations': faces.tolist() if len(faces) > 0 else [],
'image_anomalies': anomalies,
'color_analysis': {
'dominant_colors': color_features['dominant_colors'].tolist(),
'mean_color': color_features['mean_color'].tolist()
},
'tampering_analysis': tampering
}
return results
# Note: This would work with actual image files
# result = process_security_image('security_camera_frame.jpg')
Real-World Example:
Security systems use computer vision for facial recognition, license plate detection, perimeter monitoring, and analyzing surveillance footage for suspicious activities.