Python机器学习之数据预处理篇

发布时间:2023-06-05 16:30

字典特征提取

from sklearn.feature_extraction import DictVectorizer
# #特征提取
# #导入包
# from sklearn.feature_extraction.text import CountVectorizer
#
# #实例化CountVectorizer
# vector = CountVectorizer()
# #调用fit_transform输入并转换数据
# res = vector.fit_transform([\"life is short, i like python\", \"life is too long, i dislike python\"])
# #打印结果
# print(vector.get_feature_names())
# print(res.toarray())

def dictvec():
    \"\"\"
    字典数据抽取
    :return: None
    \"\"\"
    #实例化
    dict = DictVectorizer(sparse=False)
    data = dict.fit_transform([{\'city\':\'北京\', \'temperature\':100}, {\'city\':\'上海\', \'temperature\': 60}, {\'city\':\'深圳\', \'temperature\':30}])
    print(dict.get_feature_names())
print(dict.inverse_transform(data))
    print(data)
    return None
if __name__ == \"__main__\":
    dictvec()

运行截图
\"在这里插入图片描述\"

文本特征提取英文、中文

from sklearn.feature_extraction.text import CountVectorizer
import jieba

def countvec():
    \"\"\"
     对文本进行特征值化
    :return:None
    \"\"\"
    cv = CountVectorizer()
    data = cv.fit_transform([\"life is short, i like python\", \"life is too long, i dislike python\"])
    print(cv.get_feature_names())
    print(data.toarray()) # 转化成数组输出
    return None


def cutword():
    con1 = jieba.cut(\"今天很残酷,明天更残酷,后天很美好,但绝对大部分是死在明天晚上,所以每个人不要放弃今天\")
    con2 = jieba.cut(\"我们看到的从很远星系来的光是在几百万年之前发出的,这样当我们看到宇宙时,我们是在看它的过去。\")
    con3 = jieba.cut(\"如果只用一种方式了解某种事物,你就不会真正了解它。了解事物真正含义的秘诀取决于如何将其与我们所了解的事物相联系。\")

    #转换成列表
    content1 = list(con1)
    content2 = list(con2)
    content3 = list(con3)

    #把列表转换成字符串
    c1 = \' \'.join(content1)
    c2 = \' \'.join(content2)
    c3 = \' \'.join(content3)
    return c1, c2, c3

def hanzivec():
    \"\"\"
    中文特征值化
    :return: None
    \"\"\"
    c1, c2, c3 = cutword()
    print(c1, c2, c3)
    cv = CountVectorizer()
    data = cv.fit_transform([c1, c2, c3])
    print(cv.get_feature_names())
    print(data.toarray())  # 转化成数组输出
    return None

if __name__ == \"__main__\":
    countvec()
    hanzivec()


运行截图
\"在这里插入图片描述\"

词的占比

from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import jieba

def cutword():
    con1 = jieba.cut(\"今天很残酷,明天更残酷,后天很美好,但绝对大部分是死在明天晚上,所以每个人不要放弃今天\")
    con2 = jieba.cut(\"我们看到的从很远星系来的光是在几百万年之前发出的,这样当我们看到宇宙时,我们是在看它的过去。\")
    con3 = jieba.cut(\"如果只用一种方式了解某种事物,你就不会真正了解它。了解事物真正含义的秘诀取决于如何将其与我们所了解的事物相联系。\")

    #转换成列表
    content1 = list(con1)
    content2 = list(con2)
    content3 = list(con3)

    #把列表转换成字符串
    c1 = \' \'.join(content1)
    c2 = \' \'.join(content2)
    c3 = \' \'.join(content3)
    return c1, c2, c3

def tfidvec():
    \"\"\"
    中文特征值化
    :return: None
    \"\"\"
    c1, c2, c3 = cutword()
    print(c1, c2, c3)
    tf = TfidfVectorizer()
    data = tf.fit_transform([c1, c2, c3])
    print(tf.get_feature_names())
    print(data.toarray())  # 转化成数组输出
    return None

if __name__ == \"__main__\":
    tfidvec()

TF-IDF算法实现

from collections import defaultdict
import math
import operator

\"\"\"
函数说明:创建数据样本
Returns:
    dataset - 实验样本切分的词条
    classVec - 类别标签向量
\"\"\"


def loadDataSet():
    dataset = [[\'my\', \'dog\', \'has\', \'flea\', \'problems\', \'help\', \'please\'],  # 切分的词条
               [\'maybe\', \'not\', \'take\', \'him\', \'to\', \'dog\', \'park\', \'stupid\'],
               [\'my\', \'dalmation\', \'is\', \'so\', \'cute\', \'I\', \'love\', \'him\'],
               [\'stop\', \'posting\', \'stupid\', \'worthless\', \'garbage\'],
               [\'mr\', \'licks\', \'ate\', \'my\', \'steak\', \'how\', \'to\', \'stop\', \'him\'],
               [\'quit\', \'buying\', \'worthless\', \'dog\', \'food\', \'stupid\']]
    classVec = [0, 1, 0, 1, 0, 1]  # 类别标签向量,1代表好,0代表不好
    return dataset, classVec


\"\"\"
函数说明:特征选择TF-IDF算法
Parameters:
     list_words:词列表
Returns:
     dict_feature_select:特征选择词字典
\"\"\"


def feature_select(list_words):
    # 总词频统计
    doc_frequency = defaultdict(int)
    for word_list in list_words:
        for i in word_list:
            doc_frequency[i] += 1

    # 计算每个词的TF值
    word_tf = {}  # 存储没个词的tf值
    for i in doc_frequency:
        word_tf[i] = doc_frequency[i] / sum(doc_frequency.values())

    # 计算每个词的IDF值
    doc_num = len(list_words)
    word_idf = {}  # 存储每个词的idf值
    word_doc = defaultdict(int)  # 存储包含该词的文档数
    for i in doc_frequency:
        for j in list_words:
            if i in j:
                word_doc[i] += 1
    for i in doc_frequency:
        word_idf[i] = math.log(doc_num / (word_doc[i] + 1))

    # 计算每个词的TF*IDF的值
    word_tf_idf = {}
    for i in doc_frequency:
        word_tf_idf[i] = word_tf[i] * word_idf[i]

    # 对字典按值由大到小排序
    dict_feature_select = sorted(word_tf_idf.items(), key=operator.itemgetter(1), reverse=True)
    return dict_feature_select


if __name__ == \'__main__\':
    data_list, label_list = loadDataSet()  # 加载数据
    features = feature_select(data_list)  # 所有词的TF-IDF值
    print(features)
    print(len(features))


归一化和标准化


from sklearn.preprocessing import MinMaxScaler, StandardScaler

def mm():
    \"\"\"
    归一化处理
    :return: None
    \"\"\"
    mm = MinMaxScaler()
    data = mm.fit_transform([[90, 2, 10, 40],[60, 4, 15, 45], [75, 3, 13, 46]])

    print(data)
    return None

def stand():
    \"\"\"
    标准化缩放
    :return: None
    \"\"\"
    std = StandardScaler()
    data = std.fit_transform([[1., -1.,3.], [2., 4., 5.], [4., 6., -1.]])
    print(data)
    return None

if __name__ == \"__main__\":
    # tfidvec()
    print(\"归一化处理:\")
    mm()
    print(\"标准化:\")
    stand()


处理缺失值

import numpy as np
from sklearn.impute import SimpleImputer

def im():
    \"\"\"
    缺失值处理:
    首先创建函数对象imp_mean用于插值,fit函数提供了用平均数,
    中位数等插值方法的数据x,data是真正要处理的数据.
    最后即取x与data每列的平均值填补data的缺失值.
    在实际操作中,经常使用fit_transfrom()函数,
    即要处理的数据和提供用于处理的数据都是同一个data.
    :return: None
    \"\"\"
    imp_mean = SimpleImputer(missing_values=np.nan, strategy=\'mean\')
    x = [[7, 2, 3], [4, np.nan, 6], [10, 5, 9]]
    data = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]

    Fit = imp_mean.fit(x)
    new_data = imp_mean.transform(data)
    print(\"Fit\")
    print(Fit) 
    print(\"data\")
    print(data)
    print(\"imp_mean\")
    print(imp_mean.transform(data))

    return None

if __name__ == \"__main__\":
    im()


数据降维之特征选择

from sklearn.feature_selection import VarianceThreshold
from sklearn.decomposition import PCA

def var():
    \"\"\"
    特征选择-删除低方差的特征
    :return: None
    \"\"\"
    var = VarianceThreshold(threshold=0.0) #默认参数值为0.0,作用:把所有相同数据值删除掉,参数值可以在0~10内
    data = var.fit_transform([[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]])
    print(data)
    return None

def pca():
    \"\"\"
    主成分分析进行特征降维
    :return: None
    \"\"\"
    pca = PCA(n_components=0.9)
    data = pca.fit_transform([[2, 8, 4, 5], [6, 3, 0, 8], [5, 4, 9, 1]])
    print(data)
    return None

if __name__ == \"__main__\":
    var()
    print(\"------------------------------\")
    pca()

获取数据

from sklearn.datasets import load_iris, fetch_20newsgroups, load_boston
from sklearn.model_selection import train_test_split
# 获取鸢尾花数据
li = load_iris()
print(\"获取特征值\")
print(li.data)
print(\"目标值\")
print(li.target)
print(\"属性名称和类别名称\")
print(li.DESCR)

# 划分训练集和测试集
# 注意返回值,训练集 train x_train, y_train   测试集 test x_test, y_test
x_train, x_test, y_train, y_test=train_test_split(li.data, li.target, test_size=0.25)

print(\"训练集特征值和目标值:\",x_train, y_train)
print(\"测试集特征值和目标值:\", x_test, y_test)

# 获取新闻数据
news = fetch_20newsgroups(subset=\'all\')

print(news.data)
print(news.target)

print(\"获取波士顿的房价数据\")
lb = load_boston()
print(\"获取特征值\")
print(lb.data)
print(\"目标值\")
print(lb.target)
print(lb.DESCR)


K-近邻算法实现案例

Kaggle-Facebook V: Predicting Check Ins
数据获取:
链接:https://pan.baidu.com/s/1ZT39BIG8LjJ3F6GYfcbfPw
提取码:hoxm

from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd

def knncls():
    \"\"\"
    K-近邻预测用户签到位置
    :return: None
    \"\"\"
    #读取数据,生成DataFrame格式
    data = pd.read_csv(\"train.csv\")

    # print(data.head(10))

    #处理数据
    #1、缩小数据,查询数据筛选 只选取1.0
    data = data.query(\"x >1.0 & x < 1.25 & y>2.5 & y< 2.75\")

    #处理时间的数据,把时间戳整数转化成日期格式--年月日小时分钟秒
    time_value = pd.to_datetime(data[\'time\'],unit=\'s\') #设置时间单位为秒(s)
    print(\"time_value:\\n\",time_value)

    # 把日期格式转换成 字典格式然后就可以获取,年月日时分秒工作日等单独的时间,
    time_value = pd.DatetimeIndex(time_value)

    #构造一些特征,从字典中获取特征值
    data[\'day\'] =time_value.day
    data[\'hour\'] = time_value.hour
    data[\'weekday\'] = time_value.weekday

    #把时间戳(即日期的整数表示,与excel表中的整数表示日期一个意思)特征删除
    data = data.drop([\'time\'],axis= 1)# 这里1表示列,scikit-learn中0表示列

    print(\"删除时间戳后的数据:\\n\",data)

    #把签到数量少于n个目标位置删除
    place_count = data.groupby(\'place_id\').count()
    # reset_index()将会将原来的索引index作为新的一列,
    # 新的DataFrame中的列索引变为0,1,2,3,...
    tf = place_count[place_count.row_id > 3].reset_index()
    print(\"tf:\\n\",tf)
    data = data[data[\'place_id\'].isin(tf.place_id)]
    data = data.drop([\'row_id\'], axis=1)  # row_id对数据预测没用,因此可以删除来提高预测率
    print(\"删除一定数据后:\\n\",data)

    #取出数据当中的特征值和目标值
    y = data[\'place_id\'] # 目标值

    x =data.drop([\'place_id\'], axis=1) # 特征值

    #进行数据的分割训练集合测试集
    # 传入参数依次是特征值,目标值,测试集大小
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)

    #特征工程(标准化)标准化对结果影响很大!一般不能省略
    std = StandardScaler()

    #对测试集和训练集的特征值进行标准化
    x_train = std.fit_transform(x_train)

    #  上一条语句已经用过了fit_transform就有了fit过程,可以不需要重新计算平均值标准差,直接调用transform即可
    x_test = std.transform(x_test)

    #进行算法流程
    knn = KNeighborsClassifier(n_neighbors=5)

    #fit(输入数据), predict(预测结果), score(得出准确率)
    knn.fit(x_train, y_train) # 传入训练集

    #得出预测结果
    y_predict = knn.predict(x_test) # 只要有特征值就能预测

    print(\"预测的目标签到位置为:\",y_predict)

    #得出准确率
    print(\"预测的准确率:\",knn.score(x_test, y_test))

    return None

if __name__ == \"__main__\":
    knncls()


K-近邻算法作业鸢尾花预测作业

from sklearn.datasets import load_iris, fetch_20newsgroups, load_boston
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
\"\"\"
每一次运行的结果都不一定一样,有点类似于随机函数,在预测准确率一定范围内波动。
测试过程中准确率在89%~97.7%波动
\"\"\"
# 获取鸢尾花数据
li = load_iris()
# print(\"获取特征值:\")
# print(li.data)
# print(\"目标值:\")
# print(li.target)
# print(\"属性名称和类别名称:\")
# print(li.DESCR)

# 划分训练集和测试集
# 注意返回值,训练集 train x_train, y_train   测试集 test x_test, y_test
x_train, x_test, y_train, y_test=train_test_split(li.data, li.target, test_size=0.25)

print(\"训练集特征值:\\n\",x_train,\"\\n目标值:\\n\", y_train)
print(\"测试集特征值:\\n\", x_test,\"\\n目标值:\\n\", y_test)

#特征工程 
std = StandardScaler()
x_train = std.fit_transform(x_train)
x_test = std.transform(x_test)

#进行算法流程
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(x_train,y_train)

y_predict = knn.predict(x_test)

print(\"预测的鸢尾花类别:\\n\",y_predict)

print(\"预测的准确率:\",knn.score(x_test,y_test))

ItVuer - 免责声明 - 关于我们 - 联系我们

本网站信息来源于互联网,如有侵权请联系:561261067@qq.com

桂ICP备16001015号