ENSURE_COLUMN


CREATE OR REPLACE PROCEDURE ENSURE_COLUMN(var_table_name in varchar2, var_column_name in varchar2, var_column_type in varchar2)
AS
 IS_TABLE_EXISTS NUMBER;
 IS_COLUMN_EXISTS NUMBER;
BEGIN
         SELECT count(*) into  IS_TABLE_EXISTS FROM USER_TABLES WHERE TABLE_NAME=var_table_name;
         IF IS_TABLE_EXISTS=0 THEN
            EXECUTE IMMEDIATE 'CREATE TABLE '||var_table_name||' ( '||var_column_name||' '||var_column_type||' ) ';
         ELSE
            SELECT count(*) into  IS_COLUMN_EXISTS FROM user_tab_columns WHERE TABLE_NAME=var_table_name AND COLUMN_NAME=var_column_name;
            IF IS_COLUMN_EXISTS=0 THEN
                 EXECUTE IMMEDIATE 'ALTER TABLE '||var_table_name||' ADD ('||var_column_name||' '||var_column_type||' ) ';
            ELSE
                 EXECUTE IMMEDIATE 'ALTER TABLE '||var_table_name||' MODIFY ('||var_column_name||' '||var_column_type||' ) ';
            END IF;
         END IF;
END;


from utils import generate_report from imblearn.ensemble import EasyEnsembleClassifier eec = EasyEnsembleClassifier(random_state=42) eec.fit(X_train, y_train) y_pred = eec.predict(X_test) plot_distribution(train_data) plot_distribution(test_data) report = generate_report(y_pred, y_test, output=True) accuracy = report['accuracy'] print(accuracy) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\pandas\core\arrays\categorical.py:591, in Categorical.astype(self, dtype, copy) 590 try: --> 591 new_cats = new_cats.astype(dtype=dtype, copy=copy) 592 fill_value = self.categories._na_value ValueError: could not convert string to float: '130A' During handling of the above exception, another exception occurred: ValueError Traceback (most recent call last) Cell In[8], line 6 2 from imblearn.ensemble import EasyEnsembleClassifier 5 eec = EasyEnsembleClassifier(random_state=42) ----> 6 eec.fit(X_train, y_train) 8 y_pred = eec.predict(X_test) 10 plot_distribution(train_data) File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\sklearn\base.py:1389, in _fit_context.<locals>.decorator.<locals>.wrapper(estimator, *args, **kwargs) 1382 estimator._validate_params() 1384 with config_context( 1385 skip_parameter_validation=( 1386 prefer_skip_nested_validation or global_skip_validation 1387 ) 1388 ): -> 1389 return fit_method(estimator, *args, **kwargs) File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\imblearn\ensemble\_easy_ensemble.py:271, in EasyEnsembleClassifier.fit(self, X, y) 269 self._validate_params() 270 # overwrite the base class method by disallowing `sample_weight` --> 271 return super().fit(X, y) File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\sklearn\utils\validation.py:63, in _deprecate_positional_args.<locals>._inner_deprecate_positional_args.<locals>.inner_f(*args, **kwargs) 61 extra_args = len(args) - len(all_args) 62 if extra_args <= 0: ---> 63 return f(*args, **kwargs) 65 # extra_args > 0 66 args_msg = [ 67 "{}={}".format(name, arg) 68 for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:]) 69 ] File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\sklearn\base.py:1389, in _fit_context.<locals>.decorator.<locals>.wrapper(estimator, *args, **kwargs) 1382 estimator._validate_params() 1384 with config_context( 1385 skip_parameter_validation=( 1386 prefer_skip_nested_validation or global_skip_validation 1387 ) 1388 ): -> 1389 return fit_method(estimator, *args, **kwargs) File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\sklearn\ensemble\_bagging.py:375, in BaseBagging.fit(self, X, y, sample_weight, **fit_params) 372 _raise_for_params(fit_params, self, "fit") 374 # Convert data (X is required to be 2d and indexable) --> 375 X, y = validate_data( 376 self, 377 X, 378 y, 379 accept_sparse=["csr", "csc"], 380 dtype=None, 381 ensure_all_finite=False, 382 multi_output=True, 383 ) 385 if sample_weight is not None: 386 sample_weight = _check_sample_weight(sample_weight, X, dtype=None) File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\sklearn\utils\validation.py:2961, in validate_data(_estimator, X, y, reset, validate_separately, skip_check_array, **check_params) 2959 y = check_array(y, input_name="y", **check_y_params) 2960 else: -> 2961 X, y = check_X_y(X, y, **check_params) 2962 out = X, y 2964 if not no_val_X and check_params.get("ensure_2d", True): File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\sklearn\utils\validation.py:1370, in check_X_y(X, y, accept_sparse, accept_large_sparse, dtype, order, copy, force_writeable, force_all_finite, ensure_all_finite, ensure_2d, allow_nd, multi_output, ensure_min_samples, ensure_min_features, y_numeric, estimator) 1364 raise ValueError( 1365 f"{estimator_name} requires y to be passed, but the target y is None" 1366 ) 1368 ensure_all_finite = _deprecate_force_all_finite(force_all_finite, ensure_all_finite) -> 1370 X = check_array( 1371 X, 1372 accept_sparse=accept_sparse, 1373 accept_large_sparse=accept_large_sparse, 1374 dtype=dtype, 1375 order=order, 1376 copy=copy, 1377 force_writeable=force_writeable, 1378 ensure_all_finite=ensure_all_finite, 1379 ensure_2d=ensure_2d, 1380 allow_nd=allow_nd, 1381 ensure_min_samples=ensure_min_samples, 1382 ensure_min_features=ensure_min_features, 1383 estimator=estimator, 1384 input_name="X", 1385 ) 1387 y = _check_y(y, multi_output=multi_output, y_numeric=y_numeric, estimator=estimator) 1389 check_consistent_length(X, y) File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\sklearn\utils\validation.py:973, in check_array(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_writeable, force_all_finite, ensure_all_finite, ensure_non_negative, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, estimator, input_name) 968 if pandas_requires_conversion: 969 # pandas dataframe requires conversion earlier to handle extension dtypes with 970 # nans 971 # Use the original dtype for conversion if dtype is None 972 new_dtype = dtype_orig if dtype is None else dtype --> 973 array = array.astype(new_dtype) 974 # Since we converted here, we do not need to convert again later 975 dtype = None File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\pandas\core\generic.py:6643, in NDFrame.astype(self, dtype, copy, errors) 6637 results = [ 6638 ser.astype(dtype, copy=copy, errors=errors) for _, ser in self.items() 6639 ] 6641 else: 6642 # else, only a single dtype is given -> 6643 new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) 6644 res = self._constructor_from_mgr(new_data, axes=new_data.axes) 6645 return res.__finalize__(self, method="astype") File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\pandas\core\internals\managers.py:430, in BaseBlockManager.astype(self, dtype, copy, errors) 427 elif using_copy_on_write(): 428 copy = False --> 430 return self.apply( 431 "astype", 432 dtype=dtype, 433 copy=copy, 434 errors=errors, 435 using_cow=using_copy_on_write(), 436 ) File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\pandas\core\internals\managers.py:363, in BaseBlockManager.apply(self, f, align_keys, **kwargs) 361 applied = b.apply(f, **kwargs) 362 else: --> 363 applied = getattr(b, f)(**kwargs) 364 result_blocks = extend_blocks(applied, result_blocks) 366 out = type(self).from_blocks(result_blocks, self.axes) File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\pandas\core\internals\blocks.py:758, in Block.astype(self, dtype, copy, errors, using_cow, squeeze) 755 raise ValueError("Can not squeeze with more than one column.") 756 values = values[0, :] # type: ignore[call-overload] --> 758 new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) 760 new_values = maybe_coerce_values(new_values) 762 refs = None File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\pandas\core\dtypes\astype.py:237, in astype_array_safe(values, dtype, copy, errors) 234 dtype = dtype.numpy_dtype 236 try: --> 237 new_values = astype_array(values, dtype, copy=copy) 238 except (ValueError, TypeError): 239 # e.g. _astype_nansafe can fail on object-dtype of strings 240 # trying to convert to float 241 if errors == "ignore": File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\pandas\core\dtypes\astype.py:179, in astype_array(values, dtype, copy) 175 return values 177 if not isinstance(values, np.ndarray): 178 # i.e. ExtensionArray --> 179 values = values.astype(dtype, copy=copy) 181 else: 182 values = _astype_nansafe(values, dtype, copy=copy) File c:\Users\matianht\.conda\envs\nomura\lib\site-packages\pandas\core\arrays\categorical.py:602, in Categorical.astype(self, dtype, copy) 597 except ( 598 TypeError, # downstream error msg for CategoricalIndex is misleading 599 ValueError, 600 ): 601 msg = f"Cannot cast {self.categories.dtype} dtype to {dtype}" --> 602 raise ValueError(msg) 604 result = take_nd( 605 new_cats, ensure_platform_int(self._codes), fill_value=fill_value 606 ) 608 return result ValueError: Cannot cast object dtype to float64 这个代码报错是为什么
07-29
资源下载链接为: https://pan.quark.cn/s/22ca96b7bd39 在 IT 领域,文档格式转换是常见需求,尤其在处理多种文件类型时。本文将聚焦于利用 Java 技术栈,尤其是 Apache POI 和 iTextPDF 库,实现 doc、xls(涵盖 Excel 2003 及 Excel 2007+)以及 txt、图片等格式文件向 PDF 的转换,并实现在线浏览功能。 先从 Apache POI 说起,它是一个强大的 Java 库,专注于处理 Microsoft Office 格式文件,比如 doc 和 xls。Apache POI 提供了 HSSF 和 XSSF 两个 API,其中 HSSF 用于读写老版本的 BIFF8 格式(Excel 97-2003),XSSF 则针对新的 XML 格式(Excel 2007+)。这两个 API 均具备读取和写入工作表、单元格、公式、样式等功能。读取 Excel 文件时,可通过创建 HSSFWorkbook 或 XSSFWorkbook 对象来打开相应格式的文件,进而遍历工作簿中的每个 Sheet,获取行和列数据。写入 Excel 文件时,创建新的 Workbook 对象,添加 Sheet、Row 和 Cell,即可构建新 Excel 文件。 再看 iTextPDF,它是一个用于生成和修改 PDF 文档的 Java 库,拥有丰富的 API。创建 PDF 文档时,借助 Document 对象,可定义页面尺寸、边距等属性来定制 PDF 外观。添加内容方面,可使用 Paragraph、List、Table 等元素将文本、列表和表格加入 PDF,图片可通过 Image 类加载插入。iTextPDF 支持多种字体和样式,可设置文本颜色、大小、样式等。此外,iTextPDF 的 TextRenderer 类能将 HTML、
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值