0% found this document useful (0 votes)
4 views

Python Reference

The document contains five separate programs demonstrating different functionalities: a Tic-Tac-Toe game, a parity check for state reachability, a logical satisfiability check, a candidate elimination algorithm for machine learning, and an evaluation of decision tree classifiers. Each program includes input handling, algorithm implementation, and output display. The programs utilize various libraries and techniques to achieve their respective tasks.

Uploaded by

avinashlnm48
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
4 views

Python Reference

The document contains five separate programs demonstrating different functionalities: a Tic-Tac-Toe game, a parity check for state reachability, a logical satisfiability check, a candidate elimination algorithm for machine learning, and an evaluation of decision tree classifiers. Each program includes input handling, algorithm implementation, and output display. The programs utilize various libraries and techniques to achieve their respective tasks.

Uploaded by

avinashlnm48
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 8

#prgm1

board = ["-", "-", "-",


"-", "-", "-",
"-", "-", "-"]

def print_board():
print(board[0] + "|" + board[1] + "|" + board[2])
print(board[3] + "|" + board[4] + "|" + board[5])
print(board[6] + "|" + board[7] + "|" + board[8])

def take_turn(player):
print(player + "'s turn.")
position = input("Choose a number from 1-9: ")
while position not in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
position = input("Invalid number. Choose a number from 1-9: ")
position = int(position) - 1
while board[position] != "-":
position = int(input("Position already taken. Choose a different
position: ")) - 1
board[position] = player
print_board()

def check_game_over():
if (board[0] == board[1] == board[2] != "-") or \
(board[3] == board[4] == board[5] != "-") or \
(board[6] == board[7] == board[8] != "-") or \
(board[0] == board[3] == board[6] != "-") or \
(board[1] == board[4] == board[7] != "-") or \
(board[2] == board[5] == board[8] != "-") or \
(board[0] == board[4] == board[8] != "-") or \
(board[2] == board[4] == board[6] != "-"):
return "win"
elif "-" not in board:
return "tie"
else:
return "play"

def play_game():
print_board()
current_player = "X"
game_over = False
while not game_over:
take_turn(current_player)
game_result = check_game_over()
if game_result == "win":
print(current_player + " wins!")
game_over = True
elif game_result == "tie":
print("It's a tie!")
game_over = True
else:
current_player = "O" if current_player == "X" else "X"

play_game()

Output
#prgm2
def count_inversions(state):
inversions = 0
state = [x for x in state if x != 0]
for i in range(len(state)):
for j in range(i + 1, len(state)):
if state[i] > state[j]:
inversions += 1
return inversions

def get_parity(state):
return count_inversions(state)
def are_reachable(state1, state2):
return get_parity(state1) == get_parity(state2)

initial_state = list(map(int, input("Enter initial state as 9 space-separated


integers: ").split()))
final_state = list(map(int, input("Enter final state as 9 space-separated
integers: ").split()))

if are_reachable(initial_state, final_state):
print("The states are reachable.")
else:
print("The states are not reachable.")

OUTPUT
#prgm3
from sympy import symbols
from sympy.logic.boolalg import And,Or,Not,Implies,Equivalent
from sympy.logic.inference import satisfiable
A=symbols('A')
B=symbols('B')
C=symbols('C')
rule1=Implies(A,B)
rule2=Implies(A,C)
scenario=And(A, rule1, rule2)
result=satisfiable(scenario)
print("Scenario is satisfiable:",result)

OUTPUT

#prgm4
import numpy as np
import pandas as pd

data = [
['Sunny', 'Warm', 'Normal', 'Strong', 'Warm', 'Same', 'Yes'],
['Sunny', 'Warm', 'High', 'Strong', 'Warm', 'Same', 'Yes'],
['Rainy', 'Cold', 'High', 'Strong', 'Warm', 'Change', 'No'],
['Sunny', 'Warm', 'High', 'Strong', 'Cool', 'Change', 'Yes']
]

columns = ['Sky', 'Temperature', 'Humidity', 'Wind', 'Water', 'Forecast',


'Enjoysport']
df = pd.DataFrame(data, columns=columns)

def find_s(examples):
specific_h = examples[0][:-1]
for instance in examples:
if instance[-1] == 'Yes':
for i in range(len(specific_h)):
if specific_h[i] != instance[i]:
specific_h[i] = '?'
return specific_h

def candidate_elimination(examples):
num_attributes = len(examples[0]) - 1
S = ['ϕ'] * num_attributes
G = [['?'] * num_attributes]

for example in examples:


if example[-1] == 'Yes':
S = example[:-1]
break

for example in examples:


x, label = example[:-1], example[-1]

if label == 'Yes':
for i in range(num_attributes):
if S[i] != x[i]:
S[i] = '?'
# Remove hypotheses from G inconsistent with S
G = [g for g in G if all(g[i] == '?' or g[i] == S[i] for i in
range(num_attributes))]

else:
new_G = []
for g in G:
for i in range(num_attributes):
if g[i] == '?':
if S[i] != x[i]:
new_hypothesis = g.copy()
new_hypothesis[i] = S[i]
if new_hypothesis not in new_G:
new_G.append(new_hypothesis)
G = new_G

return S, G

examples = df.values.tolist()
find_s_hypothesis = find_s([row for row in examples if row[-1] == 'Yes'])
print("Find-S Hypothesis:", find_s_hypothesis)

S_final, G_final = candidate_elimination(examples)


print("\nCandidate Elimination Hypothesis:")
print("S boundary: ", S_final)
print("G boundary: ", G_final)
OUTPUT

#prgm5
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

np.random.seed(42)

def generate_dataset(n_samples=1000, n_features=10, n_informative=5,


class_sep=1.0,
noise=0.0):
X, y = make_classification(n_samples=n_samples, n_features=n_features,
n_informative=n_informative, n_redundant=0,
n_clusters_per_class=1, class_sep=class_sep,
flip_y=noise, random_state=42)
return train_test_split(X, y, test_size=0.3, random_state=42)

def evaluate_id3_on_dataset(X_train, X_test, y_train, y_test,


max_depth_range=range(1,
21)):
train_acc = []
test_acc = []
for depth in max_depth_range:
clf = DecisionTreeClassifier(criterion='entropy', max_depth=depth,
random_state=42)
clf.fit(X_train, y_train)
train_acc.append(accuracy_score(y_train, clf.predict(X_train)))
test_acc.append(accuracy_score(y_test, clf.predict(X_test)))
return train_acc, test_acc

def plot_results(train_acc, test_acc, title, max_depth_range):


plt.figure(figsize=(8, 5))
plt.plot(max_depth_range, train_acc, label='Training Accuracy',
marker='o')
plt.plot(max_depth_range, test_acc, label='Testing Accuracy', marker='s')
plt.xlabel('Tree Depth')
plt.ylabel('Accuracy')
plt.title(title)
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()

dataset_configs = [
{"n_features": 10, "n_informative": 2, "class_sep": 2.0, "noise": 0.0,
"label": "Simple Dataset"},
{"n_features": 10, "n_informative": 5, "class_sep": 1.0, "noise": 0.1,
"label": "Medium Complexity"},
{"n_features": 20, "n_informative": 10, "class_sep": 0.5, "noise": 0.3,
"label": "High Complexity"}
]

for config in dataset_configs:


X_train, X_test, y_train, y_test = generate_dataset(
n_features=config["n_features"],
n_informative=config["n_informative"],
class_sep=config["class_sep"],
noise=config["noise"]
)
max_depth_range = range(1, 21)
train_acc, test_acc = evaluate_id3_on_dataset(X_train, X_test, y_train,
y_test,
max_depth_range)
plot_results(train_acc, test_acc, f'ID3 Performance: {config["label"]}',
max_depth_range)

output

You might also like