0% found this document useful (0 votes)
5 views

Main project

Uploaded by

chethanrohit0045
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
5 views

Main project

Uploaded by

chethanrohit0045
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 3

import os

import cv2
import numpy as np
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.compositing.concatenate import concatenate_videoclips
from gtts import gTTS
from transformers import pipeline
import warnings
import tensorflow as tf

# Suppress TensorFlow and resource warnings


os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
os.environ['HF_HUB_DISABLE_SYMLINKS_WARNING'] = '1'
tf.get_logger().setLevel('ERROR')
warnings.filterwarnings("ignore", category=ResourceWarning)

# Step 1: Video Scene Segmentation


def segment_video(video_path):
cap = cv2.VideoCapture(video_path)
scenes = []
frame_rate = cap.get(cv2.CAP_PROP_FPS)
prev_frame = None
frame_number = 0

while cap.isOpened():
ret, frame = cap.read()
if not ret:
break

if prev_frame is not None:


diff = cv2.absdiff(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY),
cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY))
mean_diff = np.mean(diff)
if mean_diff > 30: # Scene change threshold
scenes.append(frame_number / frame_rate)

prev_frame = frame
frame_number += 1

cap.release()

# Remove near-duplicate scenes


unique_scenes = []
min_gap = 2 # Minimum gap between scenes in seconds
for scene in scenes:
if not unique_scenes or scene - unique_scenes[-1] > min_gap:
unique_scenes.append(scene)
return unique_scenes

# Step 2: Summarization
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")

def summarize_text(text):
return summarizer(text, max_length=50, min_length=25, do_sample=False)[0]['summary_text']

# Step 3: Text-to-Speech
def text_to_speech(text, lang, output_file):
try:
tts = gTTS(text=text, lang=lang)
tts.save(output_file)
except Exception as e:
print(f"Error generating TTS for language {lang}: {e}")

# Step 4: Create Summary Video


def create_summary_video(video_path, selected_scenes, output_file):
clips = []
video = VideoFileClip(video_path)

try:
# Process scenes to ensure no overlap or invalid durations
unique_scenes = []
for start in selected_scenes:
end = start + 5 # 5-second clips
if not unique_scenes or start >= unique_scenes[-1][1]:
unique_scenes.append((start, min(end, video.duration)))

# Create subclips for unique scenes


for start, end in unique_scenes:
clips.append(video.subclip(start, end))

# Concatenate clips and export the summary video


final_clip = concatenate_videoclips(clips, method="compose")
final_clip.write_videofile(output_file, codec="libx264", verbose=False)
finally:
# Ensure the video object is closed
video.close()

# Main Function
def main():
video_path = "videoplayback (3).mp4" # Replace with your video file path
output_summary = "summary_video.mp4"

print("Segmenting video...")
scenes = segment_video(video_path)
print(f"Detected unique scenes: {scenes}")
print("Summarizing scenes...")
summarized_text = "This is a demo summary of scenes." # Replace with actual summarization
logic

print("Generating English voiceover...")


text_to_speech(summarized_text, "en", "voiceover_en.mp3")

print("Creating summary video...")


create_summary_video(video_path, scenes, output_summary)
print("Summary video created successfully!")

if __name__ == "__main__":
main()

You might also like