Annabelle Face Filter in Python
Hello Coder! Welcome to CodeWithRandom Blog.
Today we’re going to make something super cool — a real-time ANNABELLE-like filter using Python and OpenCV. Whether it’s glasses, a moustache, or a funny hat, we’ll overlay it live on your face using your webcam. You all must have seen the horror movie ‘Annabella‘. Today we will learn How I Created an Annabelle Face Filter Using Python

If you love Python and want to dive into computer vision in a fun way, this project is for you!
What Will You Build?
We’ll build a real-time filter system that:
- Opens your webcam feed
- Detects your face
- Places a PNG filter (with transparency) right on your face
- Runs live and closes with a keystroke
We will replicate a basic version of what Snapchat filters do — using just Python and OpenCV.
Resource
Before we jump into the code, make sure you’ve installed:
- Python 3.x
- OpenCV
Install OpenCV
Run the following command in your terminal:
pip install opencv-python
pip install opencv-python-headlessAlso, you need a transparent PNG image to act as your filter — like sunglasses, a mustache, or crown.
Here is My Filter Image ( Link )
Ensure the PNG file has a transparent background (alpha channel).
How It Works
- Face Detection:
We use OpenCV’s built-in Haar Cascade classifier to detect faces in the webcam feed. - Filter Placement:
Once a face is detected, we resize and place a transparent PNG (with alpha channel) on top of the face coordinates. - Live Display:
The video runs in a loop, showing the updated frame with the filter applied, and stops when you press theqkey.
Full Python Code Of Snapchat Filter:
# codewithrandom.in
import cv2
import numpy as np
import dlib
from math import hypot
# Load multiple filter images (with transparency if PNG)
filters = {
"1": cv2.imread("mask/anna.png", cv2.IMREAD_UNCHANGED),
"2": cv2.imread("mask/dog_nose.png", cv2.IMREAD_UNCHANGED),
"3": cv2.imread("mask/clown_nose.png", cv2.IMREAD_UNCHANGED),
}
current_filter = "1" # default filter key
# Open camera
cap = cv2.VideoCapture(0)
_, frame = cap.read()
rows, cols, _ = frame.shape
# Face detector and landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(
"shape_predictor_68_face_landmarks.dat"
)
def overlay_filter(frame, filter_img, top_left_x, top_left_y, w, h):
"""Overlay PNG filter with transparency"""
filter_resized = cv2.resize(filter_img, (w, h))
# If filter has alpha channel
if filter_resized.shape[2] == 4:
alpha = filter_resized[:, :, 3] / 255.0
for c in range(3):
frame[top_left_y:top_left_y+h, top_left_x:top_left_x+w, c] = (
(1 - alpha) * frame[top_left_y:top_left_y+h, top_left_x:top_left_x+w, c]
+ alpha * filter_resized[:, :, c]
)
else:
# If no alpha, just overlay
mask_gray = cv2.cvtColor(filter_resized, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(mask_gray, 25, 255, cv2.THRESH_BINARY_INV)
area = frame[top_left_y:top_left_y+h, top_left_x:top_left_x+w]
area_no_filter = cv2.bitwise_and(area, area, mask=mask)
final = cv2.add(area_no_filter, filter_resized)
frame[top_left_y:top_left_y+h, top_left_x:top_left_x+w] = final
while True:
ret, frame = cap.read()
if not ret:
break
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(frame)
for face in faces:
landmarks = predictor(gray_frame, face)
# Nose coordinates
center_nose = (landmarks.part(30).x, landmarks.part(30).y - 30)
left_nose = (landmarks.part(31).x, landmarks.part(31).y)
right_nose = (landmarks.part(35).x, landmarks.part(35).y)
nose_width = int(hypot(left_nose[0] - right_nose[0], left_nose[1] - right_nose[1]) * 6.5)
nose_height = int(nose_width * 1.1)
# Top-left coordinates
top_left_x = int(center_nose[0] - nose_width / 2)
top_left_y = int(center_nose[1] - nose_height / 2)
# Boundary check
if (
top_left_x < 0
or top_left_y < 0
or top_left_x + nose_width > cols
or top_left_y + nose_height > rows
):
continue
# Apply current filter
if current_filter in filters and filters[current_filter] is not None:
overlay_filter(frame, filters[current_filter], top_left_x, top_left_y, nose_width, nose_height)
# Show result with instructions
cv2.putText(frame, "Press 1/2/3 to change filter | ESC to exit", (20, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,0), 2)
cv2.imshow("Snapchat Filter - Nose Overlay", frame)
key = cv2.waitKey(1)
if key == 27: # ESC to quit
break
elif chr(key) in filters: # Change filter
current_filter = chr(key)
cap.release()
cv2.destroyAllWindows()
For Windows Users:
import cv2
import mediapipe as mp
import pyautogui
import time
# Initialize webcam
cap = cv2.VideoCapture(0)
# Initialize MediaPipe Hands
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
static_image_mode=False,
max_num_hands=1,
min_detection_confidence=0.7,
min_tracking_confidence=0.7
)
mp_draw = mp.solutions.drawing_utils
# Get screen size
screen_width, screen_height = pyautogui.size()
# Smooth pointer movement (reduces shaky motion)
smoothening = 7
plocX, plocY = 0, 0
clocX, clocY = 0, 0
# Frame rate check
prev_time = 0
print("👉 Press 'q' to exit the program.")
while True:
success, img = cap.read()
if not success:
print("❌ Failed to grab frame from webcam.")
break
# Flip image for natural movement (mirror effect)
img = cv2.flip(img, 1)
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = hands.process(rgb_img)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_draw.draw_landmarks(img, hand_landmarks, mp_hands.HAND_CONNECTIONS)
# Get index finger tip (landmark 8)
h, w, c = img.shape
index_finger = hand_landmarks.landmark[8]
x, y = int(index_finger.x * w), int(index_finger.y * h)
# Convert webcam coords to screen coords
screen_x = np.interp(x, (0, w), (0, screen_width))
screen_y = np.interp(y, (0, h), (0, screen_height))
# Smooth movement
clocX = plocX + (screen_x - plocX) / smoothening
clocY = plocY + (screen_y - plocY) / smoothening
# Move mouse
pyautogui.moveTo(clocX, clocY)
plocX, plocY = clocX, clocY
# Detect clicks (Thumb tip landmark 4 with index tip 8)
thumb = hand_landmarks.landmark[4]
thumb_x, thumb_y = int(thumb.x * w), int(thumb.y * h)
# If thumb and index finger are close → left click
if abs(x - thumb_x) < 40 and abs(y - thumb_y) < 40:
pyautogui.click()
time.sleep(0.2)
# Show FPS
curr_time = time.time()
fps = 1 / (curr_time - prev_time) if prev_time != 0 else 0
prev_time = curr_time
cv2.putText(img, f'FPS: {int(fps)}', (10, 40),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# Show webcam feed
cv2.imshow("Virtual Mouse (Hand Gesture)", img)
# Exit condition
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
This version is more detailed, smoother, and optimized with:
- FPS counter
- Smooth mouse movement
- Proper comments for clarity
- Auto exit with
q
Code Output Preview ( Same For MAC & Windows )

Video Preview :
Making the Annabelle mask blend naturally with the face was hard. Using proper alpha channel processing solved this issue.
Conclusion
This project is a fun mix of OpenCV + creativity, where you can combine horror elements with real-time computer vision. Whether you’re a beginner or a curious developer, this Annabelle face filter can be a great portfolio project or weekend experiment.
Stay tuned for more horror + Python content.
THANKS FOR READING OUR BLOG
STAY WITH US
Our New Article: