slip1
python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
dataset = pd.read_csv('Downloads/archive/Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, -1].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
regressor = LinearRegression()
regressor.fit(X_train, y_train)
print("Training set:")
print("X_train:", X_train)
print("y_train:", y_train)
print("Testing set:")
print("X_test:", X_test)
print("y_test:", y_test)
slip2.
Change Page Preferences
Change Page Preferences
4)
die("attempt is over");
if($p==123)
echo"welcome";
else
echo"invalid info";
?>
2) Create ‘User’ Data set having 5 columns namely: User ID, Gender, Age, Estimated Salary and Purchased. Build a logistic regression model that can predict whether on the given parameter a person will buy a car or not.
import pandas as pd
import numpy as np
user_data = {'User ID': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'Age': [19, 35, 26, 27, 19, 32, 25, 29, 34, 25],
'Estimated Salary': [19000, 20000, 43000, 57000, 76000, 58000, 84000, 15000, 43000, 22000],
'Purchased': [0, 0, 0, 0, 0, 1, 1, 0, 1, 0]}
user_df = pd.DataFrame(user_data)
X = user_df.iloc[:, 1:-1].values
y = user_df.iloc[:, -1].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
from sklearn.linear_model import LogisticRegression
logistic_model = LogisticRegression()
logistic_model.fit(X_train, y_train)
y_pred = logistic_model.predict(X_test)
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print("Accuracy:", accuracy_score(y_test, y_pred))
print("Precision:", precision_score(y_test, y_pred))
print("Recall:", recall_score(y_test, y_pred))
print("F1-score:", f1_score(y_test, y_pred))
Slips4
1) Write a PHP script to accept Employee details (Eno, Ename, Address) on first page. On second page accept earning (Basic, DA, HRA). On third page print Employee information (Eno, Ename, Address, Basic, DA, HRA, Total) [ Use Session]
page1.html
page2.php
page3.php
name=".$name."
address=".$address.
"
basic sal=".$basic."
DA=".$da."
HRA=".$hra;
?>
2) Build a simple linear regression model for Fish Species Weight Prediction.
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
fish_data = pd.read_csv('fish_data.csv')
X = fish_data.iloc[:, 1:2].values
y = fish_data.iloc[:, 2].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
from sklearn.metrics import mean_squared_error, r2_score
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print("Mean squared error:", mse)
print("R2 score:", r2)
new_data = [[30]]
prediction = regressor.predict(new_data)
print("Predicted weight:", prediction)
Slips5
1) Create XML file named “Item.xml”with item-name, item-rate, item quantity Store the details of 5 Items of different Type
-
pen
10
2
pencil
5
6
book
100
2
scale
10
87
notes
0976
2
2) Use the iris dataset. Write a Python program to view some basic statistical details like percentile, mean, std etc. of the species of 'Iris-setosa', 'Iris-versicolor' and 'Iris-virginica'. Apply logistic regression on the dataset to identify different species (setosa, versicolor, verginica) of Iris flowers given just 4 features: sepal and petal lengths and widths.. Find the accuracy of the model.
import pandas as pd
from sklearn.datasets import load_iris
iris = load_iris()
iris_df = pd.DataFrame(data=iris.data, columns=iris.feature_names)
iris_df['target'] = iris.target
iris_df['target'] = iris_df['target'].apply(lambda x: iris.target_names[x])
print(iris_df[iris_df['target'] == 'setosa'].describe())
print(iris_df[iris_df['target'] == 'versicolor'].describe())
print(iris_df[iris_df['target'] == 'virginica'].describe())
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print('Accuracy:', accuracy)
Slips6
1) Write PHP script to read “book.xml” file into simpleXML object. Display attributes and elements . ( simple_xml_load_file() function )
book.xml
1
c
xyz
2
php
abc
3
java
pqr
book.php
2) Create the following dataset in python & Convert the categorical values into numeric format.Apply the apriori algorithm on the above dataset to generate the frequent itemsets and association rules. Repeat the process with different min_sup values.
import pandas as pd
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori, association_rules
data = [['Milk', 'Egg', 'Bread'],
['Milk', 'Bread'],
['Milk', 'Egg', 'Bread', 'Cheese'],
['Milk', 'Egg'],
['Bread', 'Cheese']]
te = TransactionEncoder()
te_ary = te.fit_transform(data)
tid = pd.DataFrame(te_ary, columns=te.columns_)
items = tid.astype('int')
items = items.replace({True: 1, False: 0})
min_sup_values = [0.4, 0.6, 0.8]
for min_sup in min_sup_values:
frequent_itemsets = apriori(items, min_support=min_sup, use_colnames=True)
rules = association_rules(frequent_itemsets, metric="confidence", min_threshold=0.7)
print('Min support:', min_sup)
print('Frequent itemsets:')
print(frequent_itemsets)
print('Association rules:')
print(rules)
print()
Slips7
1) Write a PHP script to read “Movie.xml” file and print all MovieTitle and ActorName of file using DOMDocument Parser. “Movie.xml” file should contain following information with at least 5 records with values. MovieInfoMovieNo, MovieTitle, ActorName ,ReleaseYear
65464
dilwale
varun
2015
load('movie.xml');
$movieInfoList = $doc->getElementsByTagName('movie');
foreach ($movieInfoList as $movieInfo) {
$movieTitle = $movieInfo->getElementsByTagName('title')->item(0)->nodeValue;
$actorName = $movieInfo->getElementsByTagName('actor')->item(0)->nodeValue;
echo "MovieTitle: " . $movieTitle . "
";
echo "ActorName: " . $actorName . "
";
}
?>
2) Download the Market basket dataset. Write a python program to read the dataset and display its information. Preprocess the data (drop null values etc.) Convert the categorical values into numeric format. Apply the apriori algorithm on the above dataset to generate the frequent itemsets and association rules.
import pandas as pd
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori, association_rules
import urllib.request
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/00352/Online%20Retail.xlsx"
filename = "Online Retail.xlsx"
urllib.request.urlretrieve(url, filename)
df = pd.read_excel(filename)
print("Dataset information:")
print(df.info())
df = df.dropna()
df = df[df['Quantity'] > 0]
df['StockCode'] = pd.to_numeric(df['StockCode'], errors='coerce')
transactions = df.groupby(['InvoiceNo'])['StockCode'].apply(list).values.tolist()
te = TransactionEncoder()
te_ary = te.fit_transform(transactions)
tid = pd.DataFrame(te_ary, columns=te.columns_)
items = tid.astype('int')
min_sup = 0.03
frequent_itemsets = apriori(items, min_support=min_sup, use_colnames=True)
rules = association_rules(frequent_itemsets, metric="confidence", min_threshold=0.7)
print('Min support:', min_sup)
print('Frequent itemsets:')
print(frequent_itemsets)
print('Association rules:')
print(rules)