
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import streamlit as st
import altair as alt
# Streamlit UI for sliders
st.title("Interactive Linear Regression")
# Slider for parameter 'a'
a = st.slider("Select a (Slope)", min_value=-10.0, max_value=10.0, step=0.1)
# Slider for parameter 'C'
c = st.slider("Select C (Noise)", min_value=0.0, max_value=100.0, step=1.0)
# Slider for number of points 'n'
n = st.slider("Select number of points (n)", min_value=10, max_value=500, step=1)
# Generate random data
np.random.seed(42)
X = 2 * np.random.rand(n, 1)
y = a * X + 50 + c * np.random.randn(n, 1)
# Create a linear regression model
model = LinearRegression()
model.fit(X, y)
# Predict y values
y_pred = model.predict(X)
# Calculate the mean squared error
mse = mean_squared_error(y, y_pred)
# Display the coefficients and the mean squared error
st.write(f"Mean Squared Error: {mse}")
st.write(f"Regression Coefficient (Slope): {model.coef_[0][0]}")
st.write(f"Intercept: {model.intercept_[0]}")
# Combine data into a DataFrame for visualization
df = pd.DataFrame({
"X": X.flatten(),
"Actual y": y.flatten(),
"Predicted y": y_pred.flatten()
})
# Create scatter plot for actual data points
scatter = alt.Chart(df).mark_circle(size=60).encode(
x=alt.X('X', title='X'),
y=alt.Y('Actual y', title='y'),
tooltip=['X', 'Actual y']
).properties(
width=600,
height=400
)
# Create a line plot for the regression line (Predicted y)
line = alt.Chart(df).mark_line(color='red').encode(
x='X',
y='Predicted y'
)
# Combine scatter plot and regression line
chart = scatter + line
# Display the combined chart in Streamlit
st.altair_chart(chart, use_container_width=True)
寫python來解決線性回歸問題
ChatGPT
下面是一個使用 Python 來解決線性回歸問題的簡單範例,透過 scikit-learn 庫進行:
python
複製程式碼
# 引入必要的套件
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
# 生成一些範例資料 (x 為輸入,y 為輸出)
# 假設 y = 2x + 1 並加上一些隨機噪音
np.random.seed(42)
x = 2 * np.random.rand(100, 1)
y = 4 + 3 * x + np.random.randn(100, 1)
# 分割資料集為訓練集和測試集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
# 建立線性回歸模型
lin_reg = LinearRegression()
# 訓練模型
lin_reg.fit(x_train, y_train)
# 使用模型進行預測
y_pred = lin_reg.predict(x_test)
# 計算均方誤差 (Mean Squared Error)
mse = mean_squared_error(y_test, y_pred)
print(f"Mean Squared Error: {mse}")
# 視覺化結果
plt.scatter(x_test, y_test, color='blue', label='Actual')
plt.plot(x_test, y_pred, color='red', label='Predicted')
plt.title('Linear Regression Results')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
這段程式碼展示了如何生成數據、訓練線性回歸模型,並計算模型預測的誤差。這裡我們使用 scikit-learn 的 LinearRegression 進行線性回歸訓練,並將結果以圖表形式展示。
這樣的模型適合用來學習和展示線性關係。
4o
用crisp的標準
ChatGPT