A Training and Testing Model Is Developed Using The Provided Dataset in Jupyter Notebook 2
A Training and Testing Model Is Developed Using The Provided Dataset in Jupyter Notebook 2
import pandas as pd
import numpy as np
data = {
"GR": [105.0241, 104.8832, 104.8206, 106.4206, 107.6301, 108.9435, 109.6038, 107.2867, 105.1824,
104.6506, 103.415, 103.6088, 102.9568, 101.7497, 100.2921, 100.4466, 101.3986, 103.812, 106.7401,
109.1152, 111.1988, 111.679, 111.6624, 111.5951, 109.7243, 110.051, 110.852, 110.4687, 110.2447,
107.1055, 101.623, 97.99983, 95.9412, 97.23185, 102.5178, 107.7495, 110.5326, 111.0461, 109.4907,
106.1861, 106.9531, 111.1288, 111.4157, 109.5503, 107.2062, 104.7279, 101.7846, 108.3299, 108.4753,
107.94, 106.8087, 104.5527, 103.6576, 103.0437, 102.9949, 104.8888, 104.5774, 103.0179, 102.1553,
99.7468, 97.26203, 96.04442, 96.14159, 96.32548, 97.28294, 99.04396, 101.5541, 105.0821, 106.9795,
109.1918, 109.0797, 106.7576, 102.948, 102.0777, 103.417, 106.9779, 109.3164, 114.2285, 113.186,
111.756, 110.9327, 111.5131, 110.7305, 110.4762],
"MXFL": [2.40768, 2.195605, 1.908103, 1.757668, 1.787998, 1.9264, 2.041258, 2.231793, 2.547698,
2.619671, 2.611392, 2.520982, 2.261424, 1.959945, 1.95817, 2.086364, 2.069686, 2.023673, 1.856997,
1.750725, 1.840206, 1.917097, 1.75794, 1.590595, 1.648303, 1.873707, 2.14548, 2.372367, 2.482318,
2.955525, 3.695009, 4.466863, 4.953736, 4.881621, 4.490706, 3.701351, 3.026894, 2.526035, 2.307822,
2.504045, 2.009562, 1.657256, 1.707543, 2.254479, 3.236273, 4.890536, 5.39631, 3.982771, 4.165687,
5.54563, 6.609555, 7.075383, 8.407475, 8.452162, 7.136439, 6.892845, 6.899497, 6.83426, 7.041435,
7.500181, 8.586868, 9.261867, 10.71992, 12.38344, 10.0349, 8.141942, 8.577694, 8.845512, 8.441437,
8.173477, 8.388666, 8.842992, 10.05801, 11.66855, 10.79198, 9.366837, 8.920071, 2.768534, 2.465336,
2.54276, 2.779685, 2.887887, 2.934226, 2.712695],
"PEF": [3.617409, 3.640286, 3.661446, 3.686015, 3.67083, 3.648049, 3.568604, 3.488262, 3.408547,
3.373072, 3.358348, 3.376372, 3.406852, 3.427814, 3.448457, 3.473017, 3.491247, 3.532272, 3.572902,
3.608074, 3.615924, 3.635189, 3.665405, 3.688682, 3.715249, 3.743406, 3.768141, 3.749162, 3.681363,
3.56331, 3.406662, 3.284232, 3.168859, 3.087751, 3.093779, 3.122155, 3.148562, 3.200313, 3.250091,
3.261484, 3.318861, 3.364056, 3.398093, 3.412377, 3.424822, 3.430919, 3.387086, 3.388278, 3.411442,
3.425616, 3.438851, 3.451566, 3.465326, 3.493689, 3.520285, 3.530327, 3.510839, 3.460394, 3.448885,
3.395862, 3.360478, 3.391446, 3.409838, 3.427821, 3.435013, 3.437298, 3.433494, 3.394591, 3.349494,
3.336358, 3.423594, 3.466463, 3.490405, 3.506466, 3.534566, 3.563658, 3.56308, 4.906909, 4.905381,
4.901607, 4.895643, 4.902998, 4.963295, 4.977894],
df = pd.DataFrame(data)
# Step 2: Split the data into features (X) and target (y)
X = df.drop("Porosity", axis=1)
y = df["Porosity"]
rf_model.fit(X_train, y_train)
# Step 5: Test the model
y_pred = rf_model.predict(X_test)
r2 = r2_score(y_test, y_pred)
plt.figure(figsize=(8, 6))
plt.xlabel('Actual Porosity')
plt.ylabel('Predicted Porosity')
plt.legend()
plt.show()
print(results)
Output