Main project
Main project
import cv2
import numpy as np
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.compositing.concatenate import concatenate_videoclips
from gtts import gTTS
from transformers import pipeline
import warnings
import tensorflow as tf
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
prev_frame = frame
frame_number += 1
cap.release()
# Step 2: Summarization
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
def summarize_text(text):
return summarizer(text, max_length=50, min_length=25, do_sample=False)[0]['summary_text']
# Step 3: Text-to-Speech
def text_to_speech(text, lang, output_file):
try:
tts = gTTS(text=text, lang=lang)
tts.save(output_file)
except Exception as e:
print(f"Error generating TTS for language {lang}: {e}")
try:
# Process scenes to ensure no overlap or invalid durations
unique_scenes = []
for start in selected_scenes:
end = start + 5 # 5-second clips
if not unique_scenes or start >= unique_scenes[-1][1]:
unique_scenes.append((start, min(end, video.duration)))
# Main Function
def main():
video_path = "videoplayback (3).mp4" # Replace with your video file path
output_summary = "summary_video.mp4"
print("Segmenting video...")
scenes = segment_video(video_path)
print(f"Detected unique scenes: {scenes}")
print("Summarizing scenes...")
summarized_text = "This is a demo summary of scenes." # Replace with actual summarization
logic
if __name__ == "__main__":
main()