尝试替换丢失的数据时堆栈出现错误

Stack with an error when trying to replace missing data

我的小项目需要你的帮助,我需要使用 Kaggle 的数据集创建预测模型,当我尝试替换 'value' 中丢失的数据时遇到错误柱子。 似乎该值被认为是一个字符串,因为它们在数字之间有点。 无法手动编辑该列,它有超过 49000 行。 如何解决这个问题?
这是代码和错误:

x['value'].replace(' ',np.NaN).astype(np.float)

ValueError: could not convert string to float: '154.619.063'

数据集:按工业部门划分的跨国公司 the dataset from Kaggle 非常感谢您的帮助

试试这个:

x['value'].str.replace('.', '').replace(' ', np.NaN).astype(np.float)
import numpy as np 
import pandas as pd
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
%pylab inline
import seaborn as sns
import pandas_profiling as pp
import plotly.graph_objs as go
from plotly.offline import iplot
import plotly.express as px
import tensorflow as tf
df = pd.read_csv("C:\Users\Souf win\Downloads\multinationals.csv", delimiter = ';')
def preprocessing(df):
df = df.copy()
df = df.drop(['partner country','ind', 'var','declaring country','unit code','part', 'cou', 'year','year.1', 'unit', 'power_code code', 'power_code' , 'reference period code', 'reference period' ], axis=1) 
missing_target_rows=df[df['value'].isna()].index
df= df.drop(missing_target_rows, axis=0).reset_index(drop=True)
df['value']=df['value'].str.replace('.', '').replace(' ',np.NaN).astype(np.float)

for column in ['economic variable' ,'industry' ]:
    dummies=pd.get_dummies(df[column], prefix=column)
    df = pd.concat([df, dummies], axis=1)
    df = df.drop(column, axis=1)
#split df to x and y
y = df['value']
x = df.drop('value', axis=1) 
#Train_test split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7,         shuffle=True, random_state=1 )
#scale x
scaler = StandardScaler()
scaler.fit(x_train)
#x_train = scaler.transform(x_train)
x_train = pd.DataFrame(scaler.transform(x_train), index=x_train.index, columns=x_train.columns)
x_test = pd.DataFrame(scaler.transform(x_test), index=x_test.index, columns=x_test.columns)
   
return x_train, x_test, y_train, y_test

x_train, x_test, y_train, y_test = preprocessing(df)
x_train
y_train
x_train.shape
inputs = tf.keras.Input(shape=(86,))
x = tf.keras.layers.Dense(128, activation='relu')(inputs)
x = tf.keras.layers.Dense(128, activation='relu')(x)
outputs=tf.keras.layers.Dense(1, activation='linear')(x)

model = tf.keras.Model(inputs=inputs, outputs=outputs)

model.compile(
optimizer='adam',
loss = 'mse'
)

history=model.fit(
    x_train,
    y_train,
    validation_split=0.2,
    batch_size=32,
    epochs=100,
    callbacks= [
        tf.keras.callbacks.EarlyStopping(
        monitor='val_loss',
        patience=3,
        restore_best_weights=True
        )
    ]
)