0% found this document useful (0 votes)
8 views

Pradeeh Program File

Deep Learning projects

Uploaded by

shadowking43320
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
8 views

Pradeeh Program File

Deep Learning projects

Uploaded by

shadowking43320
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 3

PROGRAM:

import tensorflow as tf

import numpy as np

import pretty_midi

import os

def load_midi_files(data_path):

notes = []

for file in os.listdir(data_path):

if file.endswith(".mid"):

midi_data = pretty_midi.PrettyMIDI(os.path.join(data_path, file))

for instrument in midi_data.instruments:

if not instrument.is_drum:

notes.extend(note.pitch for note in instrument.notes)

return notes

def prepare_dataset(notes, sequence_length=50):

sequences, next_notes = [], []

for i in range(len(notes) - sequence_length):

sequences.append(notes[i:i + sequence_length])

next_notes.append(notes[i + sequence_length])

return np.array(sequences), np.array(next_notes)

def create_model(sequence_length, vocab_size):

model = tf.keras.Sequential([

tf.keras.layers.Embedding(input_dim=vocab_size, output_dim=100,
input_length=sequence_length),

tf.keras.layers.LSTM(256, return_sequences=True),

tf.keras.layers.Dropout(0.3),

tf.keras.layers.LSTM(256),
tf.keras.layers.Dense(256, activation="relu"),

tf.keras.layers.Dense(vocab_size, activation="softmax")

])

model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")

return model

def train_model(model, sequences, next_notes, epochs=100, batch_size=64):

model.fit(sequences, next_notes, epochs=epochs, batch_size=batch_size)

return model

def generate_music(model, start_sequence, vocab_size, sequence_length=50, length=100):

generated_notes = list(start_sequence)

for _ in range(length):

prediction_input = np.array(generated_notes[-sequence_length:]).reshape(1, -1)

predicted_note = model.predict(prediction_input, verbose=0)

next_note = np.argmax(predicted_note)

generated_notes.append(next_note)

return generated_notes

def notes_to_midi(generated_notes, output_file="generated_music.mid"):

midi = pretty_midi.PrettyMIDI()

instrument = pretty_midi.Instrument(program=0)

start_time = 0

for note in generated_notes:

midi_note = pretty_midi.Note(

velocity=100, pitch=note, start=start_time, end=start_time + 0.5

instrument.notes.append(midi_note)

start_time += 0.5

midi.instruments.append(instrument)

midi.write(output_file)
data_path = 'C:/Users/ASUS/Documents/prathesh/brahms'

notes = load_midi_files(data_path)

unique_notes = list(set(notes))

note_to_int = {note: number for number, note in enumerate(unique_notes)}

int_to_note = {number: note for note, number in note_to_int.items()}

encoded_notes = [note_to_int[note] for note in notes]

sequence_length = 50

sequences, next_notes = prepare_dataset(encoded_notes, sequence_length)

vocab_size = len(unique_notes)

model = create_model(sequence_length, vocab_size)

trained_model = train_model(model, sequences, next_notes)

start_sequence = list(sequences[0])

generated_notes = generate_music(trained_model, start_sequence, vocab_size,


sequence_length=sequence_length)

generated_midi_notes = [int_to_note[note] for note in generated_notes]

notes_to_midi(generated_midi_notes)

print("Music generation complete. Check the output MIDI file.")

You might also like