table type usage sample:

本文通过一个具体的PL/SQL示例介绍了表类型变量的初始化与赋值过程。演示了如何创建表类型的变量,并对其进行扩展及赋值操作。此外,还探讨了表变量直接赋值的特点以及源表与目标表在内存中占用的不同地址。

1, create or replace type ename_tab as table of varchar2(30) ;
2,

set serverout on
declare
p1 ename_tab ;
p2 ename_tab ;
begin

p1 :=ename_tab('') ;
--p2 :=ename_tab('') ;

p1.extend(2);
-- p2.extend(1);

p1(1) := 'Jack' ;
p1(2) := 'Mikel' ;
p1(3) := 'Dameon' ;

p2 := p1 ;

p2(1) := 'Jack-- 1' ;
p2(2) := 'Mikel--2' ;
p2(3) := 'Dameon--3' ;

dbms_output.put_line(p1(1)) ;
dbms_output.put_line(p1(2)) ;
dbms_output.put_line(p1(3)) ;
-- dbms_output.put_line(d) ;
end;
/
3, Conclusion:

1, the table type variable must be initizlize as like : tab_type('').

2, table variable Can be assigned directly just like scaler variable, and the assigned tab var is not required to initialized.

3, the assigned tab var and source tab var occupies difference memory space address.

[@more@]

来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/350519/viewspace-1038604/,如需转载,请注明出处,否则将追究法律责任。

转载于:http://blog.itpub.net/350519/viewspace-1038604/

09-28 13:13:27.417706 1170 1230 I URCC_CORE_SERVER: [getRequestFromRunRequestMap] can not get request in runing map for handle:21159 09-28 13:13:27.417773 1170 1230 E URCC_REQUEST_QUEUE: removeNodeFromQueue is called (removeNodeFromQueue){#23:vendor/oplus/cpu/urcc/halservice/urcccore/UrccRequestQueue.cpp} 09-28 13:13:27.419470 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.419779 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.420866 5061 5123 I dnpp : (REDACTED) #audio# audio client(%d)&#39;s route start remote request 09-28 13:13:27.421438 5061 5123 I dnle : (REDACTED) #audio# consulting policy for audio input params client(%s) intent(%s) 09-28 13:13:27.421982 5061 5123 I dnlw : (REDACTED) #audio# selecting audio format..., request=%s 09-28 13:13:27.422184 5061 5123 I dnlq : (REDACTED) #audio# selecting audio config, intent=%s... 09-28 13:13:27.422565 5061 5123 I dnlw : (REDACTED) #audio# selected audio format=%s 09-28 13:13:27.422824 5061 5123 I dnax : (REDACTED) #audio# starting audio session(%d) for %s 09-28 13:13:27.423010 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.423351 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.423890 5061 5123 I dnax : (REDACTED) #audio# enforcing concurrency state on new audio session of %s 09-28 13:13:27.424919 1895 1924 D AS.AudioService: uid 10129, callingApp = com.google.android.googlequicksearchbox 09-28 13:13:27.425989 5061 5123 I dnax : (REDACTED) #audio# audio session(%d) OK to start 09-28 13:13:27.426913 5061 5123 I dnca : (REDACTED) #audio# starting audio source session(%d) on %s for %s 09-28 13:13:27.427166 5061 5123 I dnca : (REDACTED) #audio# enforcing concurrency state on a new audio source of %s 09-28 13:13:27.427577 5061 5123 I dmhl : (REDACTED) #audio# loading processor(%s) 09-28 13:13:27.427840 5061 5123 I dmhh : (REDACTED) #audio# loading processor(%s) 09-28 13:13:27.428057 5061 5123 I dmhb : (REDACTED) #audio# loading processor(%s) 09-28 13:13:27.430332 5061 5123 I dnnt : (REDACTED) #audio# enforcing routes concurrency state on %s of %s 09-28 13:13:27.430502 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.431535 5061 5145 I dmub : (REDACTED) #audio# referencing(%s) to(%s), (%s) 09-28 13:13:27.432659 23473 23970 I ajtn : ClockLookupExecutor is not enabled. 09-28 13:13:27.432774 5061 5123 I dnsc : (REDACTED) #audio# skipping session(%s) storing process, not enabled 09-28 13:13:27.432854 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.433305 5061 7857 I dmzv : (REDACTED) #audio# opening audio source(%s), offset(%s) 09-28 13:13:27.433451 5061 5123 I dnnt : (REDACTED) #audio# update route state on %s of %s 09-28 13:13:27.433574 5061 7857 I dmky : (REDACTED) #audio# delegating source(%s) opening to(%s) 09-28 13:13:27.433779 5061 7857 I dmkq : (REDACTED) #audio# open audio source(%s) 09-28 13:13:27.434191 5061 5123 I dmug : (REDACTED) #audio# schedule timeout(token(%s), duration(%s)) 09-28 13:13:27.434361 5061 7857 I dmjy : #audio# createAudioRecord 09-28 13:13:27.434454 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.434515 5061 7857 D AudioRecord: getMinFrameCount 640 09-28 13:13:27.435047 5061 7857 D AudioRecordExtImpl: AudioRecordExtImpl init 09-28 13:13:27.435283 5061 7857 D AudioRecordExtImpl: audiorecordTest 09-28 13:13:27.435386 5061 7857 D AudioRecord: set(): inputSource 6, sampleRate 16000, format 0x1, channelMask 0x10, frameCount 128000, notificationFrames 0, sessionId 0, transferType 0, flags 0, attributionSource AttributionSourceState{pid: 5061, uid: 10129, deviceId: 0, packageName: com.google.android.googlequicksearchbox, attributionTag: robin_android.audio, token: binder:0xb40000735bc37b80, renouncedPermissions: [], next: []}uid -1, pid -1 09-28 13:13:27.435965 23473 23970 I aesj : (REDACTED) FCF System FeatureName to check: %s 09-28 13:13:27.436174 23473 23970 I aesj : (REDACTED) FCF: Device model check status : %s 09-28 13:13:27.436496 23473 23970 I deby : (REDACTED) isAppDefaultAssistant setting: %s 09-28 13:13:27.436629 23473 23970 I deby : (REDACTED) isAppDefaultAssistant defaultComponentName: %s 09-28 13:13:27.436679 5061 7857 D AudioRecord: set(): mSessionId 0 09-28 13:13:27.436713 23473 23778 I dmru : #audio# client session ack-signal received from remote 09-28 13:13:27.437259 5061 7857 D AudioRecord: set(): 0xb40000735bc92000, Create AudioRecordThread, tid = 7858 09-28 13:13:27.437407 23473 23970 I akdd : getRequiredCapabilities: Bypassed, feature disabled. 09-28 13:13:27.437523 5061 7857 D AudioRecordExtImpl: doSchedBoost, set audioapp task boost, pid=5061, tid=7857, enable=1, ret=0 09-28 13:13:27.438321 1197 13387 D AudioFlingerExtImpl: oplusGetParametersBypassLock:only system or list app is allowed to get oplus parameters:DetectPulseEnable, uid:10129 09-28 13:13:27.438691 5061 7857 D AudioDetectPulse: setDetectPulse, mIsDetectPulse 0, 0x741ede40e0 09-28 13:13:27.439194 23473 23970 I deby : (REDACTED) isAppDefaultAssistant setting: %s 09-28 13:13:27.439285 23473 23970 I deby : (REDACTED) isAppDefaultAssistant defaultComponentName: %s 09-28 13:13:27.439429 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.439560 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.439700 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.439788 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.440246 1197 13387 D AudioBoost: audioSchedBoost, pid=1197, tid=13387, enable=1, ret=0 09-28 13:13:27.440342 1197 13387 D AudioFlingerExtImpl: addActiveAudioSession, add 2081, uid 10129, map size 1 09-28 13:13:27.440727 1197 13387 D APM_AudioPolicyManager: [MTK_APM_Input]getInputForAttr() source 6, sampling rate 16000, format 0x1, channel mask 0x10, session 2081, flags 0 attributes={ Content type: AUDIO_CONTENT_TYPE_UNKNOWN Usage: AUDIO_USAGE_UNKNOWN Source: AUDIO_SOURCE_VOICE_RECOGNITION Flags: 0x800 Tags: } requested device ID 0 09-28 13:13:27.441128 23473 23970 I deby : (REDACTED) isAppDefaultAssistant setting: %s 09-28 13:13:27.441208 23473 23970 I deby : (REDACTED) isAppDefaultAssistant defaultComponentName: %s 09-28 13:13:27.441287 1197 13387 D APM::AudioPolicyEngine: getDeviceForInputSource()input source 6, device 80000004 09-28 13:13:27.442071 1092 1840 D AudioALSAStreamManager: openInputStream(), devices = 0x80000004, format = 0x1, channels = 0x10, sampleRate = 16000, status = 110, acoustics = 0x0, input_flag 0x0 09-28 13:13:27.442178 1092 1840 D AudioALSAStreamIn: AudioALSAStreamIn() 09-28 13:13:27.442217 1092 1840 D AudioUtility: AudioThrottleTimeControl(), mIsOutput = 0, mBytesSum = 0, mThrottleControlStartTime = 0 09-28 13:13:27.442239 1092 1840 D AudioALSAHardware: InputSetScenceFromProp: mOplusInputScence = 0 09-28 13:13:27.442251 1092 1840 D AudioALSAStreamIn: AudioALSAStreamIn(), set input scence 0 09-28 13:13:27.442282 1092 1840 D WCNChipController: IsBTI2SSupport(), BTChipHWInterface() = 3, ret = 0 09-28 13:13:27.442293 1092 1840 D AudioDspStreamManager: getDspBtscoEnable(), BTSCO offload = 0 09-28 13:13:27.442312 1092 1840 D AudioSpeechEnhanceInfo: IsBesRecTuningEnable()- 0 09-28 13:13:27.442339 1092 1840 D AudioSpeechEnhanceInfo: IsAPDMNRTuningEnable(), 0 09-28 13:13:27.442355 1092 1840 D AudioALSAStreamIn: checkOpenStreamSampleRate(), origin sampleRate 16000, kDefaultInputSourceSampleRate 48000. 09-28 13:13:27.442366 1092 1840 D AudioALSAStreamIn: set() done, devices: 0x80000004, flags: 0, acoustics: 0x0, format: 0x1, sampleRate: 16000/16000, num_channels: 0x10/1, buffer_size: 640, tempDebugflag: 0 09-28 13:13:27.442384 1092 1840 D AudioALSAStreamManager: openInputStream(), add streamIn 0xb400007d61a7ce00, idenity 110 in mInputPoolInfo, size = 1 09-28 13:13:27.442391 1092 1840 D AudioALSAStreamManager: openInputStream(), SetInputMute(mAllInputMute) 09-28 13:13:27.442400 1092 1840 D AudioALSAStreamIn: SetInputMute(), 0 09-28 13:13:27.442439 1092 1840 D AudioALSAStreamIn: setParameters(), this = 0xb400007d61a7ce00, InputSource: 0 => 6, reopen 09-28 13:13:27.442491 1092 1840 D DeviceHAL: openInputStreamCore, flags: 0, open_input_stream success, mOpenedStreamsCount 6 09-28 13:13:27.442543 1092 1840 D AudioALSAStreamIn: updateSinkMetadataV7(), device:0x0 09-28 13:13:27.444178 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.446640 1197 13387 D AudioBoost: audioSchedBoost, pid=1197, tid=7859, enable=1, ret=0 09-28 13:13:27.446748 1197 7859 I AudioFlinger: AudioFlinger&#39;s thread 0xb400007ab08e5ac8 tid=7859 ready to run 09-28 13:13:27.446964 1092 1840 D AudioALSAStreamIn: standby(), halRequest 0, mDestroy 0, flag 0, this 0xb400007d61a7ce00 09-28 13:13:27.447335 1197 7859 D AudioFlinger: updateWakeLockUids_l AudioIn_6E uids: 09-28 13:13:27.447772 1092 1840 D AudioALSAStreamIn: standby(), halRequest 0, mDestroy 0, flag 0, this 0xb400007d61a7ce00 09-28 13:13:27.451294 23473 23970 I deby : (REDACTED) isAppDefaultAssistant setting: %s 09-28 13:13:27.451402 23473 23970 I deby : (REDACTED) isAppDefaultAssistant defaultComponentName: %s 09-28 13:13:27.451890 23473 23970 I akff : (REDACTED) Provider package not found: %s 09-28 13:13:27.452314 23473 23970 I ajrs : DeviceStateLookUpExecutor getRequiredCapabilities 09-28 13:13:27.452311 1895 3204 I OplusHansManager : unfreeze uid: 10372 com.instagram.android pids: [1076] reason: W_AsyncBinder scene: LcdOn 09-28 13:13:27.452359 1106 1174 I OriginChannel: wise light debug, data[2].type is 65627,data[2].sensor is 91:[92.000000,,200.000000,,119.000000,595.000000,220387555073684] 09-28 13:13:27.452769 1895 4064 I OStatsManager_OplusBatteryStatsManager: updateStateTime: PkgName = com.instagram.android, uid = 10372, state to bg, mForegroundCnt = 0, isFg = false 09-28 13:13:27.452872 1895 4064 D OStatsManager_OplusBatteryStatsManager: notePkgUnFrozenInner uid = 10372, pkgName = com.instagram.android 09-28 13:13:27.453727 23473 23777 I deby : (REDACTED) isAppDefaultAssistant setting: %s 09-28 13:13:27.453904 23473 23777 I deby : (REDACTED) isAppDefaultAssistant defaultComponentName: %s 09-28 13:13:27.454142 1197 13387 D AudioFlinger: createRecordTrack_l, notificationFrameCount 320, maxNotificationFrames 320 09-28 13:13:27.454796 1197 13387 D AF_RecordTrack_ExtImpl: AudioFlinger_RecordThread_RecordTrackExtImpl init 09-28 13:13:27.455259 23473 23777 I aevl : (REDACTED) #FCF: Managed profile check status : %s 09-28 13:13:27.455325 1197 13387 I AF::RecordTrack: RecordTrack, track(0xb400007ab085b600): mId(299), mFrameCount 128000, mSampleRate 16000, mFormat 1, mChannelCount 1, thread 0xb400007ab08e5ac0, sessionId 2081, (null), primary-0 09-28 13:13:27.455392 1197 13387 D AudioFlingerExtImpl: setMagicVoiceLoopbackEnable mMagicVoiceLoopbackEnable 0 09-28 13:13:27.455406 1197 13387 D AF_RecordTrack_ExtImpl: notifyRecordTrackWiredDevice sessionId 2081, WiredDevice false 09-28 13:13:27.455506 23473 23777 I azzp : (REDACTED) getSimplifiedOobeViaDeeplinkUser: %b 09-28 13:13:27.455514 1197 13387 D AudioBoost: audioSchedBoost, pid=1197, tid=13387, enable=0, ret=0 09-28 13:13:27.455575 23473 23777 I azgw : (REDACTED) isSimplifiedOobeViaDeeplinkUser OobeStatus: %s, ZeroStateStatus: %s 09-28 13:13:27.456191 23473 23777 I aiew : Device location is not granted. 09-28 13:13:27.456181 5061 7857 I AudioRecord: openRecord_l: 0xb40000735bc92000, mCblk = 0x742d469000 09-28 13:13:27.456681 5061 7857 D AudioRecordExtImpl: doSchedBoost, set audioapp task boost, pid=5061, tid=7857, enable=0, ret=0 09-28 13:13:27.457638 1076 1076 D InputMethodDumpHelper: handleDump [log, always, 1] 09-28 13:13:27.457872 1076 1714 E MvfstCallbacks.cpp: readError: streamid=0; error=LocalError: Idle timeout, Idle timeout, num non control streams: 4 09-28 13:13:27.457995 1076 1714 E MvfstCallbacks.cpp: readError: streamid=3; error=LocalError: Idle timeout, Idle timeout, num non control streams: 4 09-28 13:13:27.458040 1076 1714 E MvfstCallbacks.cpp: readError: streamid=7; error=LocalError: Idle timeout, Idle timeout, num non control streams: 4 09-28 13:13:27.458248 5061 7857 D android.media.AudioRecord: thirdPartyPlaybackCaptureSupported = false 09-28 13:13:27.458102 1076 1714 E MvfstCallbacks.cpp: readError: streamid=11; error=LocalError: Idle timeout, Idle timeout, num non control streams: 4 09-28 13:13:27.459517 1076 1076 D OplusInputMethodUtil: update sDebug to true, update sDebugIme to true, update sAlwaysOn to true 09-28 13:13:27.459590 1076 1714 E MvfstCallbacks.cpp: onConnectionError: error=LocalError: Idle timeout, Idle timeout, num non control streams: 4 09-28 13:13:27.459659 1076 1076 D OplusInputMethodUtil: updateDebugToClass InputMethodManager.DEBUG = true 09-28 13:13:27.459913 1076 1076 D OplusInputMethodUtil: updateDebugToClass ImeFocusController.DEBUG = true 09-28 13:13:27.460156 5061 7857 I dmkq : (REDACTED) #audio# create stream of audio record source(identity(%s), recordId(%d)) 09-28 13:13:27.461702 5061 7857 D AudioRecord: start(282): sync event 0 trigger session 0 09-28 13:13:27.461988 1197 16718 D AF_RecordTrack_ExtImpl: startMagicVoice mYouMeSdk = 0 09-28 13:13:27.462047 1197 16718 D AudioFlinger: RecordThread::start event 0, triggerSession 0 09-28 13:13:27.466005 1197 16718 D AudioPolicyService: updateUidStates_l() current->uid=10129 current->pid=5061 allowCapture=1 UidState=2 apmStat=2 09-28 13:13:27.466086 1197 16718 D APM_AudioPolicyManager: BIRecord: In startInput, profile is primary input 09-28 13:13:27.466103 1197 16718 D APM_AudioPolicyManager: startInput() portId 282 input 110 session 2081 09-28 13:13:27.466971 1895 1980 D QosSceneRecognizer: notifyAudioStatusChanged uid: 10129, isOn: true 09-28 13:13:27.467145 1895 1980 D QosSceneRecognizer[AudioScene]: [10129, -1, true, audio] 09-28 13:13:27.467149 1197 16718 D APM::AudioPolicyEngine: getDeviceForInputSource()input source 6, device 80000004 09-28 13:13:27.467167 1076 1076 D OplusInputMethodUtil: updateDebugToClass OnBackInvokedDispatcher.DEBUG = true 09-28 13:13:27.467193 1895 1980 I QosSceneRecognizer[AudioScene]: onStateChanged add uid 10129 09-28 13:13:27.467241 1076 1076 D OplusInputMethodUtil: updateDebugToClass InsetsController.DEBUG = true 09-28 13:13:27.467274 1895 2736 D Osense-CleanPolicy: notifyAppStatus:IntegratedData{mResId=2, mTime=1759045407466, mInfo=Bundle[{pkgName=10129, uid=10129, isAdded=true}]} 09-28 13:13:27.467296 1895 1980 I QosSceneRecognizer[AudioScene]: onSceneChangeNotify:Bundle[{reason=audio, package=null, process=null, pid=-1, uid=10129, begin=true}] 09-28 13:13:27.467387 1895 1980 I QosSceneRecognizer: onSceneStateChange: sceneId 5 uid 10129 pid -1 type 0 reason audio 09-28 13:13:27.467423 1895 1980 D QosScheduler[QoSPolicy]: onAudioScene begin=true,pkgName=null,uid=10129,pid=-1,type=0 09-28 13:13:27.467436 1895 1980 D QosScheduler[QoSPolicy]: Audio On. 09-28 13:13:27.467463 1895 2736 V Athena : StateManager: stateValue:23, packageName:com.google.android.googlequicksearchbox, uid:10129, state:1 09-28 13:13:27.467750 1895 1980 I QosScheduler[QoSPolicy]: setSceneQoS begin:true sceneId:5 09-28 13:13:27.467802 1895 1980 W QosScheduler: executeSceneQosStrategy failed! qos feature no enable 09-28 13:13:27.467949 1895 1980 D QosSceneRecognizer[VideoScene]: [true, 10129, audio] 09-28 13:13:27.467967 1895 2736 D Osense-AppCompactPolicy: notifyAppStatus:IntegratedData{mResId=2, mTime=1759045407466, mInfo=Bundle[{pkgName=10129, uid=10129, isAdded=true}]} 09-28 13:13:27.468140 2708 2897 I SystemUi--QuickSettings: OplusSeparateQSPrivacyManager-->onPrivacyItemsChanged permGroupName android.permission-group.MICROPHONE 09-28 13:13:27.468172 2708 2708 I SystemEventCoordinator: isQsExpanded true: false, false 09-28 13:13:27.468174 1895 2736 D Osense-OfreezerPolicy: notifyAppStatus:IntegratedData{mResId=2, mTime=1759045407466, mInfo=Bundle[{pkgName=10129, uid=10129, isAdded=true}]} 09-28 13:13:27.468247 2708 2897 I SystemUi--QuickSettings: OplusSeparateQSPrivacyManager-->onPrivacyItemsChanged packageName com.google.android.googlequicksearchbox, uid 10129, size: 1 09-28 13:13:27.468252 1895 2736 D Osense-AppCpuLimitPolicy: notifyAppStatus:IntegratedData{mResId=2, mTime=1759045407466, mInfo=Bundle[{pkgName=10129, uid=10129, isAdded=true}]} 09-28 13:13:27.468351 1895 2736 D Osense-OGuardPolicy: notifyAppStatus:IntegratedData{mResId=2, mTime=1759045407466, mInfo=Bundle[{pkgName=10129, uid=10129, isAdded=true}]} 09-28 13:13:27.468567 1895 4047 D OGuardManager_WeakAwareManager: updateAppStatus uid:10129 start:true type:13 09-28 13:13:27.468699 1197 1527 D AudioSystem: +setParameters(): CamcorderSwitchMic=false 09-28 13:13:27.468834 1076 1076 D OplusInputMethodUtil: updateDebugToClass BaseInputConnection.DEBUG = true 09-28 13:13:27.469104 1197 1527 D AudioFlingerExtImpl: AudioFlingerExtImpl oplusSetParameters: keyvalue CamcorderSwitchMic=false 09-28 13:13:27.469452 1197 7859 D AudioFlinger: updateWakeLockUids_l AudioIn_6E uids:10129 09-28 13:13:27.469697 1092 1840 D OplusMicSwap: CamcorderSetParameters: output set camcorderret = 5, camcorderstrvalue = false 09-28 13:13:27.469716 1092 1840 D OplusMicSwap: CamcorderSetParameters: camera switch mic 0 09-28 13:13:27.470023 1092 1092 D AudioALSAStreamIn: updateSinkMetadataV7(), device:0x0 09-28 13:13:27.470503 1197 16718 D APM_AudioPolicyManager: [MTK_APM_Route]setInputDevice mIoHandle 110 : changing device 0x80000004 to 0x80000004, force = 1 09-28 13:13:27.470937 1895 4874 D BatteryStatsServiceExtImpl: report audio global start... 09-28 13:13:27.471329 1895 4064 I OStatsManager_Calc: noteAudio: uid = 10129, start = true 09-28 13:13:27.471628 1197 1527 V AudioFlinger::PatchPanel: createAudioPatch_l() num_sources 1 num_sinks 1 handle 0 09-28 13:13:27.472102 1092 1092 D AudioALSAHardware: +createAudioPatch() num_sources [1] , num_sinks [1], handle [0x0], current mAudioHalPatchVector size 5 09-28 13:13:27.472142 1092 1092 D AudioALSAStreamManager: +setParameters(), IOport = 110, keyValuePairs = input_source=6;routing=-2147483644 09-28 13:13:27.472169 1092 1092 D AudioALSAStreamManager: +routingInputDevice(), input_device: 0x80000004 => 0x80000004 09-28 13:13:27.472189 1092 1092 D WCNChipController: IsBTI2SSupport(), BTChipHWInterface() = 3, ret = 0 09-28 13:13:27.472194 1092 1092 D AudioDspStreamManager: getDspBtscoEnable(), BTSCO offload = 0 09-28 13:13:27.472208 1092 1092 D AudioALSAStreamManager: routingInputDevice(), haveOtherSharedDevice = 0 09-28 13:13:27.472213 1092 1092 W AudioALSAStreamManager: -routingInputDevice(), input_device(0x80000004) is AUDIO_DEVICE_NONE(0x0) or current_input_device(0x80000004), return 09-28 13:13:27.472221 1092 1092 D AudioALSAStreamIn: setParameters(), this = 0xb400007d61a7ce00, idenity = 110, mPolicyDevice = 0x80000004 09-28 13:13:27.472247 1092 1092 D AudioALSAHardware: -createAudioPatch() num_sources [1] , num_sinks [1], handle [0xa] 09-28 13:13:27.473753 1092 1841 D AudioALSAStreamIn: updateSinkMetadataV7(), device:0x0 09-28 13:13:27.474018 1197 1527 V AudioFlinger::PatchPanel: createAudioPatch_l() status 0 09-28 13:13:27.474105 1197 1527 V AudioFlinger::PatchPanel: -createAudioPatch_l() status 0 09-28 13:13:27.474248 1197 16718 D APM_AudioPolicyManager: setInputDevice() AF::createAudioPatch returned 0 patchHandle 108 num_sources 1 num_sinks 1 09-28 13:13:27.473860 1895 1924 I IPCThreadState: oneway function results for code 10007 on binder at 0xb4000073994e2320 will be dropped but finished with status UNKNOWN_TRANSACTION 09-28 13:13:27.475412 1197 16718 W AudioPolicyClientImpl: setSoundTriggerCaptureState active 1 09-28 13:13:27.475473 1895 1924 I IPCThreadState: oneway function results for code 10006 on binder at 0xb4000073994e2320 will be dropped but finished with status UNKNOWN_TRANSACTION 09-28 13:13:27.475567 1197 16718 D AudioPolicyManagerExtImpl: startinput client uid = 10129 09-28 13:13:27.476315 23473 23778 I ahve : initRequest 09-28 13:13:27.476620 1197 1527 D AudioSystem: +setParameters(): startinput_pid=10129 09-28 13:13:27.476914 1197 1527 D AudioFlingerExtImpl: AudioFlingerExtImpl oplusSetParameters: keyvalue startinput_pid=10129 09-28 13:13:27.477468 23473 23719 I ahve : (REDACTED) createInitializedStream, initialQuery: %s 09-28 13:13:27.477643 1092 1840 D AudioALSAHardware: +setParameters(): startinput_pid=10129 09-28 13:13:27.477756 1092 1840 D AudioALSAPlaybackHandlerKTVThread: setParameters: startinput_pid ktvret = 5, value = 10129 09-28 13:13:27.477774 1092 1840 D AudioALSAPlaybackHandlerKTVThread: KTV set false 09-28 13:13:27.477831 1092 1840 D AudioALSAHardware: =====>GetInstance()->MMI_Auto_Test 09-28 13:13:27.477849 1092 1840 D audio_engineer_test: do_audio_test_process Enter all_test[0]:NoneTest in_call:false 09-28 13:13:27.477928 1092 1840 D OplusAudioALSASuperVolume: parseAudioSuperVolumeParam(), not super_volume 09-28 13:13:27.478005 1092 1840 D AudioALSAHardware: -setParameters(): startinput_pid=10129 09-28 13:13:27.479535 1895 3201 I OplusHansManager : uid=10372, pkg=com.instagram.android F exit(), F stay=392, reason=W_AsyncBinder, FrozenActionKeepProxy=false 09-28 13:13:27.479603 1895 3204 I OplusHansManager : forceUnfreezeApp uid= 10372 unfreezeForKernel: W_AsyncBinder false 09-28 13:13:27.479638 1895 4884 D AudioSystem: Reset audio port generation 09-28 13:13:27.479905 1895 4921 D SoundTriggerHalConcurrentCaptureHandler: onCaptureStateChange, mCaptureState = true 09-28 13:13:27.480250 1197 16718 D AtlasEventUploadUtils: setEvent event:atlas_event_audio_record_start 09-28 13:13:27.481985 23473 23969 I akzz : (REDACTED) #retrieve id(%s) 09-28 13:13:27.482315 1895 4921 I IPCThreadState: oneway function results for code 10006 on binder at 0xb4000073994e2320 will be dropped but finished with status UNKNOWN_TRANSACTION 09-28 13:13:27.483924 1092 1092 W StreamHAL: Error from HAL stream in function set_microphone_direction: Function not implemented 09-28 13:13:27.484621 1092 1092 W StreamHAL: Error from HAL stream in function set_microphone_field_dimension: Function not implemented 09-28 13:13:27.485308 1895 3207 I OplusHansManager : up_BC uid=10372 pkg=com.instagram.android 09-28 13:13:27.485462 1092 7861 D AudioALSAStreamIn: open(), flag 0x0, this = 0xb400007d61a7ce00 09-28 13:13:27.485493 1092 7861 D WCNChipController: IsBTI2SSupport(), BTChipHWInterface() = 3, ret = 0 09-28 13:13:27.485499 1092 7861 D AudioDspStreamManager: getDspBtscoEnable(), BTSCO offload = 0 09-28 13:13:27.485528 1092 7861 D WCNChipController: IsBTI2SSupport(), BTChipHWInterface() = 3, ret = 0 09-28 13:13:27.485533 1092 7861 D AudioDspStreamManager: getDspBtscoEnable(), BTSCO offload = 0 09-28 13:13:27.485538 1092 7861 D AudioALSAStreamIn: open(),flag 0x0 current input share device 0x80000004 -> policy share device 0x80000004 09-28 13:13:27.485543 1092 7861 D AudioALSAStreamManager: +createCaptureHandler(), mAudioMode = 0, input_source = 6, input_device = 0x80000004, mBypassDualMICProcessUL=0, rate=16000, flag=0x0 09-28 13:13:27.485551 1092 7861 D WCNChipController: IsBTI2SSupport(), BTChipHWInterface() = 3, ret = 0 09-28 13:13:27.485554 1092 7861 D AudioDspStreamManager: getDspBtscoEnable(), BTSCO offload = 0 09-28 13:13:27.485559 1092 7861 D WCNChipController: IsBTI2SSupport(), BTChipHWInterface() = 3, ret = 0 09-28 13:13:27.485562 1092 7861 D AudioDspStreamManager: getDspBtscoEnable(), BTSCO offload = 0 09-28 13:13:27.485592 1092 7861 D AudioALSACaptureHandlerNormal: AudioALSACaptureHandlerNormal() 09-28 13:13:27.485597 1092 7861 D AudioALSACaptureHandlerNormal: init() 09-28 13:13:27.485600 1092 7861 D AudioALSAStreamManager: setHDRRecord(), setHDRRecord = 0, mHDRRecordEna = 0 09-28 13:13:27.485604 1092 7861 D AudioALSAHardwareResourceManager: setHDRRecord(), setHDRRecord = 0, mHDRRecordEna = 0 09-28 13:13:27.485616 1092 7861 D AudioALSACaptureHandlerBase: getCaptureHandlerType(), mCaptureHandlerType = 2 09-28 13:13:27.485620 1092 7861 D AudioALSACaptureHandlerNormal: +open(), input_device = 0x80000004, input_source = 0x6, sample_rate=16000, num_channels=1 09-28 13:13:27.485632 1092 7861 D AudioALSACaptureDataClientAurisysNormal: AudioALSACaptureDataClientAurisysNormal(+) 09-28 13:13:27.485646 1092 7861 D AudioALSACaptureDataProviderBase: configStreamAttribute(), audio_mode: 0 => 0, input_device: 0x80000004 => 0x80000004, flag: 0x0 => 0x0, input_source: 6->6, output_device: 0x2 => 0x2, sample_rate: 48000 => 16000, period_us: 0 => 0, DSP out sample_rate: 16000 => 16000 09-28 13:13:27.485650 1092 7861 D AudioALSACaptureDataProviderBase: configStreamAttribute(), mInputScence: 0 => 0, mOutputScence: 0 => 0 09-28 13:13:27.485655 1092 7861 D AudioALSACaptureDataProviderBase: attach(), 0xb400007d61a83c00, mCaptureDataClientVector.size()=0, Identity=0xb400007d4f794780, mCaptureDataProviderType = 0 09-28 13:13:27.485664 1092 7861 D AudioALSADeviceParser: compare pcm success = 10, stringpair = Capture_1 09-28 13:13:27.485670 1092 7861 D AudioALSACaptureDataProviderNormal: open(+), audiomode=0, cardindex = 0, pcmindex = 10 09-28 13:13:27.485878 1895 3207 I OplusBinderProxy: un_pb uid: 10372 pid: 1076 rst: true , Desc: android.hardware.display.IDisplayManagerCallback/1 09-28 13:13:27.486051 1895 3207 I OplusBinderProxy: proxyBinder uid: 10372 pkg: com.instagram.android proxy: false calling: OFreezer 09-28 13:13:27.486085 1895 3207 D OplusHansManager : Restore alarm is disabled. 09-28 13:13:27.486600 1092 7861 D audio_engineer_test: stop_capturing Enter onOff:1 09-28 13:13:27.486625 1092 7861 D OplusMicSwap: SetSwapMicEnable: oplus_get_config = SUPPORT isenable = 1 09-28 13:13:27.486635 1092 7861 D AudioSpeechEnhanceInfo: GetHifiRecord, mHiFiRecordEnable=0 09-28 13:13:27.486641 1092 7861 D AudioALSACaptureDataProviderNormal: buffersizemax: 262144, bHifiRecord: 0, btempDebug: 0 09-28 13:13:27.486652 1092 7861 D AudioALSAHardwareResourceManager: paramPath = InputSource,VoiceRecognition 09-28 13:13:27.486676 1092 7861 D AudioSpeechEnhanceInfo: GetHifiRecord, mHiFiRecordEnable=0 09-28 13:13:27.486685 1092 7861 D AudioALSACaptureDataProviderBase: getInputSampleRate(), input_device: 0x80000004, output_device 0x2, hifi_record = 0, phone call open = 0 09-28 13:13:27.486677 4173 5201 D OplusAtlas.OplusAudioRecordInfoUploadHelper: callback success event:atlas_event_audio_record_start eventInfo:recorderPid:5061,TrackSession:2081,inputDevice:80000004 09-28 13:13:27.486734 1092 7861 D AudioALSACaptureDataProviderNormal: mConfig format: 3, channels: 2, rate: 48000, period_size: 960, period_count: 4, latency: 20, kReadBufferSize: 7680, mCaptureDropSize: 0 09-28 13:13:27.486754 1092 7861 D AudioALSADeviceConfigManager: ApplyDeviceTurnonSequenceByName() DeviceName = ADDA_TO_CAPTURE1 descriptor->DeviceStatusCounte = 0, Ctlsize=4 09-28 13:13:27.486851 4173 5201 D OplusAtlasAudioDetectionManager: callback success event:atlas_event_audio_record_start eventInfo:recorderPid:5061,TrackSession:2081,inputDevice:80000004 09-28 13:13:27.486975 1092 7861 D AudioALSAHardwareResourceManager: +startInputDevice_l(), new_device: 0x80000004, mInputDevice: 0x0, mStartInputDeviceCount: 0, mMicInverse: 0, InputChannel: 2, mBuiltInMicSpecificTyp: 0 09-28 13:13:27.487009 1092 7861 D AudioALSAHardwareResourceManager: enableTurnOnSequence(), sequence: builtin_Mic_DualMic 09-28 13:13:27.487021 1092 7861 D AudioALSADeviceConfigManager: ApplyDeviceTurnonSequenceByName() DeviceName = builtin_Mic_DualMic descriptor->DeviceStatusCounte = 0, Ctlsize=16 09-28 13:13:27.487251 4173 5216 D OplusAtlas.OplusAudioRecordInfoUploadHelper: setRecordEvent pid:5061 session:2081 09-28 13:13:27.487289 4173 5216 D OplusAtlas.OplusAudioRecordInfoUploadHelper: new AudioRecordInfo session:2081 09-28 13:13:27.487519 1092 7861 D AudioALSAHardwareResourceManager: -startInputDevice_l(), mInputDevice = 0x80000004, mStartInputDeviceCount = 1 09-28 13:13:27.487988 1895 3204 I OplusHansManager : unfreeze uid: 99910372 com.instagram.android pids: [2114] reason: W_AsyncBinder scene: LcdOn 09-28 13:13:27.488481 1895 4064 I OStatsManager_OplusBatteryStatsManager: updateStateTime: PkgName = com.instagram.android, uid = 99910372, state to bg, mForegroundCnt = 0, isFg = false 09-28 13:13:27.488538 1895 3201 I OplusHansManager : uid=99910372, pkg=com.instagram.android F exit(), F stay=177, reason=W_AsyncBinder, FrozenActionKeepProxy=false 09-28 13:13:27.488607 1895 4064 D OStatsManager_OplusBatteryStatsManager: notePkgUnFrozenInner uid = 99910372, pkgName = com.instagram.android 09-28 13:13:27.488676 1895 3204 I OplusHansManager : forceUnfreezeApp uid= 99910372 unfreezeForKernel: W_AsyncBinder false 09-28 13:13:27.488755 1895 3207 I OplusHansManager : up_BC uid=99910372 pkg=com.instagram.android 09-28 13:13:27.488813 1895 3207 I OplusBinderProxy: proxyBinder uid: 99910372 pkg: com.instagram.android proxy: false calling: OFreezer 09-28 13:13:27.488831 1895 3207 D OplusHansManager : Restore alarm is disabled. 09-28 13:13:27.488900 1895 3204 I OplusHansManager : forceUnfreezeApp uid= 99910372 unfreezeForKernel: W_AsyncBinder false 09-28 13:13:27.489310 1895 3204 I OplusHansManager : forceUnfreezeApp uid= 99910372 unfreezeForKernel: W_AsyncBinder false 09-28 13:13:27.490051 1197 16718 D AudioBoost: audioAppKeyThreadReport() pid 5061, tid 7857, enable 1, inUid 1, eventIt -1, threadType 0, id 299 09-28 13:13:27.490120 1197 16718 D AudioBoost: appThreadInfo update:start 7857, write -1, cbk 7858 read -1 09-28 13:13:27.490130 1197 16718 D AudioBoost: audioAppKeyThreadReport(), restore boost for threadType 0, tid 7857 09-28 13:13:27.490192 1197 16718 D AudioBoost: audioAppKeyThreadReport(), restore boost for threadType 2, tid 7858 09-28 13:13:27.490524 5061 7857 I AudioRecord: start(282): return status 0 09-28 13:13:27.490618 1895 4921 D AudioService.RecordingActivityMonitor: recorderEvent, event = 0 09-28 13:13:27.491659 23473 23969 I ahue : Process query connecting 09-28 13:13:27.491769 23473 23719 I ahue : (REDACTED) Send ProcessQuery initial with query type: %s 09-28 13:13:27.491898 5061 7857 I dmls : #audio# start audio buffering 09-28 13:13:27.492429 23473 23719 I ahue : (REDACTED) Send ProcessQuery initial with s3Connection and enableManualEndpointing: %s for queryTriggerType: %s, supportedUiSource: %s 09-28 13:13:27.492786 23473 23719 I ahue : Send ProcessQuery initial with blocking session 09-28 13:13:27.495011 1197 1516 D AudioBoost: startBoostForWake mId 299, p 5061, t 7857, wp 5061, wt 7857. 09-28 13:13:27.495091 1197 1516 D AudioBoost: audioSchedBoost, pid=5061, tid=7857, enable=1, ret=0 09-28 13:13:27.496051 5061 5142 I dmwv : (REDACTED) #audio# starting listening client(%s) route(%s) took %d(ms) 09-28 13:13:27.496420 2114 2356 E MvfstCallbacks.cpp: readError: streamid=0; error=LocalError: Idle timeout, Idle timeout, num non control streams: 5 09-28 13:13:27.496622 2114 2356 E MvfstCallbacks.cpp: readError: streamid=3; error=LocalError: Idle timeout, Idle timeout, num non control streams: 5 09-28 13:13:27.497661 5061 5145 I dmwv : (REDACTED) #audio# AudioRequestListeningSession start listening status: %s 09-28 13:13:27.498074 1092 7861 D AudioALSAPlaybackHandlerKTVThread: openKTV2(), ktvapppid = 10129, ktvfgpid = 0,input_device:80000004,voip:0, reverbStatus:0 09-28 13:13:27.498101 1092 7861 D AudioALSAPlaybackHandlerKTVThread: +setinputActive(), pre_active_num = 0, start_input = 2147483652 09-28 13:13:27.498113 1092 7861 D OplusAudioALSACaptureDataProviderBase: calculateMuteSize muteTime = 120, mMuteSize = 46080 09-28 13:13:27.498122 1092 7861 D OplusAudioALSACaptureDataProviderBase: calculateFadeInSize fadeInTime = 60, mFadeInSize = 23040 mStep = 0.000174, audio_format(0x4) 09-28 13:13:27.498127 1092 7861 D AudioALSACaptureDataProviderNormal: open(-) 09-28 13:13:27.498135 1092 7861 D AudioALSACaptureDataClientAurisysNormal: mLatency 20, mRawDataPeriodBufSize 7680, mProcessedDataPeriodBufSize 640, mEchoRefDataPeriodBufSize 0, stream_attribute_target->audio_format = 1 mStreamAttributeSource->audio_format = 4 09-28 13:13:27.498142 1092 7861 D AudioMTKGainController: +SetCaptureGain(), mode=0, source=6, input device=0x80000004, output device=0x2 09-28 13:13:27.498364 1092 7861 D AudioALSACaptureDataClientAurisysNormal: CreateAurisysLibManager(), voip: 0, HDR record: 0,low_latency: 0, aec: 0, input_source: 6, flag: 0x0 => mAurisysScenario: 3 09-28 13:13:27.498402 1092 7861 D AudioALSAHardware: UpdateSceneToAurisys(), mInputScence = 0, mMultiRecordScene = 0 09-28 13:13:27.498419 1092 7861 D aurisys_utility: input dev: 0x80000004, fmt = 0x4, fs: 48000, max fs: 48000, ch: 2, max ch: 2, ch maks: 0xc, hw_info_mask: 0x0; output dev: 0x2, fmt = 0x0, fs: 0, max fs: 48000, ch: 0, max ch: 2, ch maks: 0x0, hw_info_mask: 0x4; task_scene: 3, audio_mode: 0, stream_type: 0, output_flags: 0x0, input_source: 6, input_flags: 0x0; network_info: 0, enhancement_feature_mask: 0x0 09-28 13:13:27.498434 1092 7861 D AudioUtility: setupCustomInfoStr(), custom_info = "SetAudioCustomScene=;MTK_REC_AINR=false;", (scene = , vol_level = -1, bt_codec = -1) 09-28 13:13:27.498491 1092 7861 D aurisys_utility: lib, working fs: 48000, fmt: 0x4, frame = 20, b_interleave = 0, num_ul_ref_buf_array = 0, num_dl_ref_buf_array = 0 09-28 13:13:27.498497 1092 7861 D aurisys_utility: ul in[type:0], ch: 2, ch_mask: 0xc, buf fs: 48000, buf content fs: 48000, fmt: 0x4; ul out[type:1], ch: 2, ch_mask: 0xc, buf fs: 48000, buf content fs: 48000, fmt: 0x4 09-28 13:13:27.498542 1092 7861 D android.hardware.audio.service.mediatek: App_table: 4 09-28 13:13:27.498546 1092 7861 D android.hardware.audio.service.mediatek: categoryPath = Scene,Default,Application,VR,Profile,Handset, custom_scene = Default 09-28 13:13:27.498576 1092 7861 D android.hardware.audio.service.mediatek: RECORD_PARAM_EXT IS NULL! 09-28 13:13:27.498667 1092 7861 D aurisys_lib_handler: aurisys_arsi_parsing_param_file(), gProductInfo "platform=MT6835,device=RE60AFL1,model=RMX5078", file_path "/odm/etc/audio/audio_param/Speech_AudioParam.xml", enhancement_mode 0, param_buf_size 4716, data_size 4716, custom_info SetAudioCustomScene=;MTK_REC_AINR=false; 09-28 13:13:27.499562 1197 1197 D AudioFlingerExtImpl: getRecordInfo result=6,1,10,16000 09-28 13:13:27.500826 5061 5142 I dmvu : #audio# reportMicUpdate 09-28 13:13:27.502423 1092 7861 D aurisys_lib_handler: aurisys_arsi_create_handler(), lib_name mtk_speech_enh, 0xb400007d4f01a800, memory_size 3948384, arsi_handler 0xb400007d36e00000, retval 0x0 09-28 13:13:27.502503 1092 7861 D aurisys_lib_manager: UL Lib, lib_name mtk_speech_enh, 0xb400007d4f01a800, sample_rate: 48000 => 48000, num_channels: 2 => 2, audio_format: 0x4 => 0x4, interleave: 1 => 0, frame: 0 => 20 09-28 13:13:27.502517 1092 7861 D aurisys_lib_manager: UL out, sample_rate: 48000 => 16000, num_channels: 2 => 1, audio_format: 0x4 => 0x1, interleave: 0 => 1, frame: 20 => 0 09-28 13:13:27.502716 5061 5145 I dmga : (REDACTED) #audio# registering receiver(%s), range(offset=%d, minimal=%s), buffer(%s) 09-28 13:13:27.502741 1092 7861 D MtkAudioChannelConvertInC: InitMtkAudioChannelConverterInC 09-28 13:13:27.502902 1092 7861 D aurisys_lib_handler: lib_name mtk_speech_enh, 0xb400007d4f01a800, set ul_analog_gain_ref_only 48, ul_digital_gain 48, retval 0 09-28 13:13:27.502948 1092 7861 W aurisys_controller: EXCHG not found for any <library>!! return fail!! 09-28 13:13:27.503015 2114 2332 W RealtimeClientManager: Channel state: MqttChannelState{mConnectionState=DISCONNECTED, mDisconnectionReason=null, mLastConnectionMs=220200453, mLastDisconnectMs=220387607} 09-28 13:13:27.503080 23473 23777 I dmru : #audio# start listening status received from remote 09-28 13:13:27.503209 1092 7861 D AudioALSACaptureDataClientAurisysNormal: AudioALSACaptureDataClientAurisysNormal(-), mDropMs = 60, mDropPopSize = 1920 09-28 13:13:27.503235 1092 7861 D AudioALSACaptureHandlerNormal: -open() 09-28 13:13:27.503251 1092 7861 D AudioALSAStreamIn: open(), Set RawStartFrameCount = 0 09-28 13:13:27.503294 1092 7861 D AudioALSAStreamIn: openWavDump(), mDumpFile is NULL 09-28 13:13:27.503989 1197 1516 D AudioBoost: startBoostForWake mId 299, p 5061, t 7858, wp 5061, wt 7858. 09-28 13:13:27.504084 5061 5120 I dnqr : (REDACTED) #audio# audio request client(token(%d)) session(token(%d)) start listening status(%s) 09-28 13:13:27.504169 1197 1516 D AudioBoost: audioSchedBoost, pid=5061, tid=7858, enable=1, ret=0 09-28 13:13:27.504400 2114 2356 E MvfstCallbacks.cpp: readError: streamid=7; error=LocalError: Idle timeout, Idle timeout, num non control streams: 5 09-28 13:13:27.504502 2114 2356 E MvfstCallbacks.cpp: readError: streamid=11; error=LocalError: Idle timeout, Idle timeout, num non control streams: 5 09-28 13:13:27.504511 23473 23777 I amvs : (REDACTED) Audio session opened, streaming audio with config %s... 09-28 13:13:27.504555 2114 2356 E MvfstCallbacks.cpp: readError: streamid=4; error=LocalError: Idle timeout, Idle timeout, num non control streams: 5 09-28 13:13:27.504648 2114 2356 E MvfstCallbacks.cpp: onConnectionError: error=LocalError: Idle timeout, Idle timeout, num non control streams: 5 09-28 13:13:27.504834 1092 7869 D AudioALSACaptureDataProviderNormal: +readThread(), pid: 1092, tid: 7869, kReadBufferSize=0x1e00, open_index=5, UPLINK_SET_AMICDCC_BUFFER_TIME_MS = 80, counter=1 09-28 13:13:27.504854 1092 7869 D AudioALSACaptureDataProviderBase: pcm_start 09-28 13:13:27.504868 2114 2114 D InputMethodDumpHelper: handleDump [log, always, 1] 09-28 13:13:27.505264 2114 2114 D OplusInputMethodUtil: update sDebug to true, update sDebugIme to true, update sAlwaysOn to true 09-28 13:13:27.505379 2114 2114 D OplusInputMethodUtil: updateDebugToClass InputMethodManager.DEBUG = true 09-28 13:13:27.505358 1133 3309 E PerformanceService: [WAKER_IDENTIFY] pid message 7858;1;1 is writen to proc 09-28 13:13:27.505398 2114 2356 E MvfstCallbacks.cpp: writeChain error: Connection closed 09-28 13:13:27.505429 2114 2114 D OplusInputMethodUtil: updateDebugToClass ImeFocusController.DEBUG = true 09-28 13:13:27.505461 2114 2114 D OplusInputMethodUtil: updateDebugToClass OnBackInvokedDispatcher.DEBUG = true 09-28 13:13:27.505484 2114 2114 D OplusInputMethodUtil: updateDebugToClass InsetsController.DEBUG = true 09-28 13:13:27.505510 2114 2114 D OplusInputMethodUtil: updateDebugToClass BaseInputConnection.DEBUG = true 09-28 13:13:27.505545 2114 2356 E MvfstCallbacks.cpp: writeChain error: Connection closed 09-28 13:13:27.506012 1197 1516 D AudioBoost: audioWakeTracing setWakeSeedThread end, pid 5061, tid 7858, inUid 1, id 299, isWakee 1, -> handle = 2717. 09-28 13:13:27.504909 23473 23777 I amye : Sending InitializeRequest 09-28 13:13:27.507445 23473 23775 I amvs : Start streaming main mic audio... 09-28 13:13:27.508167 23473 23969 I dmga : (REDACTED) #audio# registering receiver(%s), range(offset=%d, minimal=%s), buffer(%s) 09-28 13:13:27.508455 2114 2255 E MQTTBypassDGWStreamGroupCallbacks.cpp: onStreamGroupError TransientError domain: MNSQUICConnection code: 1 detail: Idle timeout, num non control streams: 5 09-28 13:13:27.508739 23473 23777 I amye : (REDACTED) getInitialS3Request: audioConfig=%s 09-28 13:13:27.508757 2114 2255 W MQTTBypassDGWStreamGroupConnection.cpp: Restarting due to reason: StreamError 09-28 13:13:27.508809 1243 1243 I OplusRequestedLayerState: setFlags sequence=7208, flags[0x00000000], mask[0x00000001], newFlags[0x00000001], name=Sprite#7208 09-28 13:13:27.511482 2114 2294 W LibevQuicAsyncUDPSocket.h: setTosOrTrafficClass not implemented in LibevQuicAsyncUDPSocket 09-28 13:13:27.512236 2114 2124 W FastPrintWriter: Write failure java.io.IOException: write failed: EPIPE (Broken pipe) at libcore.io.IoBridge.write(IoBridge.java:651) at java.io.FileOutputStream.write(FileOutputStream.java:436) at com.android.internal.util.FastPrintWriter.flushBytesLocked(FastPrintWriter.java:355) at com.android.internal.util.FastPrintWriter.flushLocked(FastPrintWriter.java:378) at com.android.internal.util.FastPrintWriter.flush(FastPrintWriter.java:413) at android.os.Binder.dump(Binder.java:1077) at android.os.Binder.onTransact(Binder.java:945) at com.android.internal.inputmethod.IInputMethodClient$Stub.onTransact(IInputMethodClient.java:276) at android.os.Binder.execTransactInternal(Binder.java:1444) at android.os.Binder.execTransact(Binder.java:1378) Caused by: android.system.ErrnoException: write failed: EPIPE (Broken pipe) at libcore.io.Linux.writeBytes(Native Method) at libcore.io.Linux.write(Linux.java:336) at libcore.io.ForwardingOs.write(ForwardingOs.java:948) at libcore.io.BlockGuardOs.write(BlockGuardOs.java:448) at libcore.io.ForwardingOs.write(ForwardingOs.java:948) at libcore.io.IoBridge.write(IoBridge.java:646) ... 9 more 09-28 13:13:27.514011 17128 17128 D OplusBluetoothWearCheck: receive action: android.media.ACTION_AUDIO_RECORD_START 09-28 13:13:27.515866 1895 1895 D InputMethodDumpHelper: handleDump [log, always, 1] 09-28 13:13:27.516480 1895 1895 D OplusInputMethodUtil: update sDebug to true, update sDebugIme to true, update sAlwaysOn to true 09-28 13:13:27.516555 1895 1895 D OplusInputMethodUtil: updateDebugToClass InputMethodManager.DEBUG = true 09-28 13:13:27.516581 1895 1895 D OplusInputMethodUtil: updateDebugToClass ImeFocusController.DEBUG = true 09-28 13:13:27.515759 4173 5216 D OplusAtlasService: isAppInRecording result :true 09-28 13:13:27.516605 1895 1895 D OplusInputMethodUtil: updateDebugToClass OnBackInvokedDispatcher.DEBUG = true 09-28 13:13:27.516628 1895 1895 D OplusInputMethodUtil: updateDebugToClass InsetsController.DEBUG = true 09-28 13:13:27.516648 1895 1895 D OplusInputMethodUtil: updateDebugToClass BaseInputConnection.DEBUG = true 09-28 13:13:27.518756 1895 2736 D Osense-ResourceCallback: onReceive:android.media.ACTION_AUDIO_RECORD_START 09-28 13:13:27.518919 1895 2736 D Osense-ResourceCallback: handleAudioRecordChange...uid:10129, pkgName:com.google.android.googlequicksearchbox, isAdded:true 09-28 13:13:27.519041 3170 3170 D SubsysBroadcastManager: onReceive action:android.media.ACTION_AUDIO_RECORD_START 09-28 13:13:27.519068 1895 2736 D Osense-CleanPolicy: notifyAppStatus:IntegratedData{mResId=18, mTime=1759045407519, mInfo=Bundle[{pkgName=com.google.android.googlequicksearchbox, pid=5061, uid=10129, isAdded=true}]} 09-28 13:13:27.519087 3170 3170 D OSService: onAudioRecordChange 09-28 13:13:27.519178 1895 4884 D AS.AudioService: handleRecordingConfigurationChanged: event = 2 ,sampleRate = 16000 ,uid = 10129 ,source = 6 ,device = 15 09-28 13:13:27.519221 1895 2736 D Osense-AppCompactPolicy: notifyAppStatus:IntegratedData{mResId=18, mTime=1759045407519, mInfo=Bundle[{pkgName=com.google.android.googlequicksearchbox, pid=5061, uid=10129, isAdded=true}]} 09-28 13:13:27.519270 1895 2736 D Osense-OfreezerPolicy: notifyAppStatus:IntegratedData{mResId=18, mTime=1759045407519, mInfo=Bundle[{pkgName=com.google.android.googlequicksearchbox, pid=5061, uid=10129, isAdded=true}]} 09-28 13:13:27.519335 3170 3170 D SysBroadcastReceiver: android.media.ACTION_AUDIO_RECORD_START 09-28 13:13:27.519372 1895 2736 D Osense-AppCpuLimitPolicy: notifyAppStatus:IntegratedData{mResId=18, mTime=1759045407519, mInfo=Bundle[{pkgName=com.google.android.googlequicksearchbox, pid=5061, uid=10129, isAdded=true}]} 09-28 13:13:27.519407 3170 3170 D SysBroadcastReceiver: EventId=1010001 ID_AUDIO_RECORD_START TimeStamp =1759045407519 Intent =Intent { act=android.media.ACTION_AUDIO_RECORD_START flg=0x4000010 xflg=0x4 (has extras) } Content =null Pid =0 PkgName =null 09-28 13:13:27.519500 1895 2736 D Osense-OGuardPolicy: notifyAppStatus:IntegratedData{mResId=18, mTime=1759045407519, mInfo=Bundle[{pkgName=com.google.android.googlequicksearchbox, pid=5061, uid=10129, isAdded=true}]} 09-28 13:13:27.519782 1895 4047 D OGuardManager_WeakAwareManager: updateAppStatus uid:10129 start:true type:10 09-28 13:13:27.519847 3170 17696 D SystemState: noteAudioInStart: pid = 5061, uid = 10129 09-28 13:13:27.519892 1895 1980 D QosSceneRecognizer: notifyRecordStatusChanged uid: 10129, pid: 5061, isOn: true 09-28 13:13:27.519939 1895 4047 I OGuardManager_WeakAwareManager: stop uid:10129 type:AudioRecorder mRunning:false duration:0 09-28 13:13:27.520017 1895 1980 D QosSceneRecognizer[AudioScene]: [10129, 5061, true, record] 09-28 13:13:27.520052 3170 17696 I ConferenceScene: audioRecordStateChanged 5061 10129 true 09-28 13:13:27.520152 1895 1980 I QosSceneRecognizer[AudioScene]: onSceneChangeNotify:Bundle[{reason=record, package=com.google.android.googlequicksearchbox, process=com.google.android.googlequicksearchbox:interactor, pid=5061, uid=10129, begin=true}] 09-28 13:13:27.520190 3170 17696 I VideoCallScene: isInVideoCall, mIsVoiceCall = false, mIsActivityVoiceCall = false, mActivityVoiceCallUid = -1, mVoiceCallPkg = , mVoiceCallUid = -1, cameraUid = -1, mIsVideoPlay = false 09-28 13:13:27.520273 1895 1980 I QosSceneRecognizer: onSceneStateChange: sceneId 5 uid 10129 pid 5061 type 0 reason record 09-28 13:13:27.520319 1895 1980 D QosScheduler[QoSPolicy]: onAudioScene begin=true,pkgName=com.google.android.googlequicksearchbox,uid=10129,pid=5061,type=0 09-28 13:13:27.520342 1895 1980 W QosScheduler: executeSceneQosStrategy failed! qos feature no enable 09-28 13:13:27.520482 1895 1980 I QosSceneRecognizer[VideoCallScene]: notifySceneState, audio record start = true 09-28 13:13:27.520581 3158 3497 D DataLimitControl: onRecordingConfigChanged: mRecordingsApps = [10129] 09-28 13:13:27.520653 1895 1980 I QosSceneRecognizer[VideoCallScene]: isInVideoCall, mIsInVoiceCall = false,mVoiceCallPkg =null,mVoiceCallUid=0, cameraUid = 0 09-28 13:13:27.520722 1895 1980 I QosSceneRecognizer[VideoCallScene]: isInVideoCall = false hasState = false 09-28 13:13:27.520779 1895 1980 D QosSceneRecognizer[VideoScene]: [true, 10129, record] 09-28 13:13:27.521213 1092 7869 D OplusAudioALSAThreadSched: kernelVersion 515 09-28 13:13:27.521501 1092 7869 D OplusAudioALSAThreadSched: setAudioHalSchedToUx after setUx(pid:1092, tid:7869) 日志分析,Gemini应用程序中的语音命令有时没有响应
10-12
帮我修改下面代码,把训练模型的方法从抽样改成所有数据进行训练 # -*- coding: utf-8 -*- """ Created on Sat Aug 9 11:56:46 2025 @author: srx20 """ # -*- coding: utf-8 -*- """ Created on Sat Aug 9 10:33:06 2025 @author: srx20 """ import os import glob import pandas as pd import numpy as np import joblib import gc from datetime import datetime, timedelta from sklearn.preprocessing import StandardScaler from sklearn.cluster import MiniBatchKMeans from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report, confusion_matrix import talib as ta import warnings import chardet import psutil import sys import pyarrow as pa import pyarrow.parquet as pq import pyarrow.csv as pc from tqdm import tqdm from sklearn.model_selection import train_test_split # 修复警告处理 warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) warnings.filterwarnings("ignore", category=RuntimeWarning) warnings.filterwarnings("ignore", category=pd.errors.ParserWarning) warnings.filterwarnings("ignore", category=UserWarning) warnings.filterwarnings("ignore", category=FutureWarning) # 忽略FutureWarning class StockPredictionSystem: def __init__(self, config): """ 初始化预测系统 - 针对超大内存优化 :param config: 配置字典 """ self.config = config self.five_min_paths = { &#39;sz&#39;: config[&#39;five_min_sz_path&#39;], &#39;sh&#39;: config[&#39;five_min_sh_path&#39;] } self.daily_paths = { &#39;sz&#39;: config[&#39;daily_sz_path&#39;], &#39;sh&#39;: config[&#39;daily_sh_path&#39;] } self.output_path = config[&#39;output_path&#39;] self.start_date = datetime.strptime(config[&#39;start_date&#39;], &#39;%Y-%m-%d&#39;) self.end_date = datetime.strptime(config[&#39;end_date&#39;], &#39;%Y-%m-%d&#39;) self.data = None self.features = None self.labels = None self.scaler = StandardScaler() self.cluster_model = None self.prediction_model = None self.feature_cols = [] self.temp_dir = os.path.join(self.output_path, "temp") os.makedirs(self.temp_dir, exist_ok=True) self.parquet_files = [] def print_memory_usage(self, step_name): """打印当前内存使用情况""" process = psutil.Process(os.getpid()) mem = process.memory_info().rss / 1024 ** 2 print(f"[{step_name}] 当前内存使用: {mem:.2f} MB") def safe_read_csv(self, file_path, required_columns): """ 安全读取CSV文件 - 使用PyArrow进行高效读取 :param file_path: 文件路径 :param required_columns: 需要的列名列表 :return: 读取的DataFrame或None """ try: # 检查文件大小 if not os.path.exists(file_path): print(f"文件不存在: {file_path}") return None file_size = os.path.getsize(file_path) if file_size == 0: print(f"文件 {file_path} 大小为0,跳过") return None # 使用PyArrow读取CSV read_options = pc.ReadOptions( use_threads=True, block_size=4096 * 1024 # 4MB块大小 ) parse_options = pc.ParseOptions(delimiter=&#39;,&#39;) convert_options = pc.ConvertOptions( include_columns=required_columns, column_types={ &#39;date&#39;: pa.string(), &#39;time&#39;: pa.string(), &#39;open&#39;: pa.float32(), &#39;high&#39;: pa.float32(), &#39;low&#39;: pa.float32(), &#39;close&#39;: pa.float32(), &#39;volume&#39;: pa.float32(), &#39;amount&#39;: pa.float32() } ) table = pc.read_csv( file_path, read_options=read_options, parse_options=parse_options, convert_options=convert_options ) # 转换为Pandas DataFrame df = table.to_pandas() # 检查是否读取到数据 if df.empty: print(f"文件 {file_path} 读取后为空") return None return df except Exception as e: print(f"读取文件 {file_path} 时出错: {str(e)}") return None def process_and_save_chunk(self, df, market, stock_code, chunk_index): """ 处理单个股票的数据块并保存为Parquet文件 - 内存优化版本 """ if df is None or df.empty: return None try: # 添加市场前缀 df[&#39;stock_code&#39;] = f"{market}_{stock_code}" # 修复日期时间转换问题 df[&#39;date&#39;] = df[&#39;date&#39;].astype(str).str.zfill(8) # 填充为8位字符串 df[&#39;time&#39;] = df[&#39;time&#39;].astype(str) # 处理时间格式 df[&#39;time&#39;] = df[&#39;time&#39;].apply( lambda x: f"{x[:2]}:{x[2:4]}" if len(x) == 4 else x ) # 合并日期和时间 df[&#39;datetime&#39;] = pd.to_datetime( df[&#39;date&#39;] + &#39; &#39; + df[&#39;time&#39;], format=&#39;%Y%m%d %H:%M&#39;, errors=&#39;coerce&#39; ) # 删除无效的日期时间 df = df.dropna(subset=[&#39;datetime&#39;]) # 筛选日期范围 df = df[(df[&#39;datetime&#39;] >= self.start_date) & (df[&#39;datetime&#39;] <= self.end_date)] if df.empty: return None # 优化内存使用 df = df[[&#39;stock_code&#39;, &#39;datetime&#39;, &#39;open&#39;, &#39;high&#39;, &#39;low&#39;, &#39;close&#39;, &#39;volume&#39;, &#39;amount&#39;]] # 保存为Parquet文件 output_file = os.path.join(self.temp_dir, f"{market}_{stock_code}_{chunk_index}.parquet") # 使用PyArrow直接写入Parquet,避免Pandas中间转换 table = pa.Table.from_pandas(df, preserve_index=False) pq.write_table(table, output_file, compression=&#39;SNAPPY&#39;) return output_file except Exception as e: print(f"处理股票 {stock_code} 时出错: {str(e)}") return None def incremental_merge_parquet_files(self, parquet_files, batch_size=100): """ 增量合并Parquet文件 - 避免一次性加载所有数据 :param parquet_files: Parquet文件列表 :param batch_size: 每次合并的文件数量 :return: 合并后的Parquet文件路径 """ merged_file = os.path.join(self.temp_dir, "merged_data.parquet") # 如果文件已存在,删除 if os.path.exists(merged_file): os.remove(merged_file) # 分批合并文件 for i in tqdm(range(0, len(parquet_files), batch_size), desc="合并Parquet文件"): batch_files = parquet_files[i:i+batch_size] # 读取当前批次文件 tables = [] for file in batch_files: try: table = pq.read_table(file) tables.append(table) except Exception as e: print(f"读取文件 {file} 出错: {str(e)}") if not tables: continue # 合并当前批次 merged_table = pa.concat_tables(tables) # 追加到输出文件 if os.path.exists(merged_file): # 追加模式 with pq.ParquetWriter(merged_file, merged_table.schema) as writer: writer.write_table(merged_table) else: # 首次写入 pq.write_table(merged_table, merged_file) # 释放内存 del tables del merged_table gc.collect() return merged_file def load_and_preprocess_data(self): """ 加载和预处理数据 - 使用增量合并避免内存溢出 """ print("开始加载和预处理数据...") self.print_memory_usage("开始加载数据") # 创建临时目录 os.makedirs(self.temp_dir, exist_ok=True) parquet_files = [] # 加载五分钟线数据 for market, path in self.five_min_paths.items(): print(f"开始处理市场: {market}, 路径: {path}") file_count = 0 processed_count = 0 # 获取文件列表 csv_files = list(glob.glob(os.path.join(path, &#39;*.csv&#39;))) print(f"找到 {len(csv_files)} 个文件") for file_path in tqdm(csv_files, desc=f"处理 {market} 市场文件"): file_count += 1 stock_code = os.path.basename(file_path).split(&#39;.&#39;)[0] try: # 安全读取CSV文件 df = self.safe_read_csv(file_path, [&#39;date&#39;, &#39;time&#39;, &#39;open&#39;, &#39;high&#39;, &#39;low&#39;, &#39;close&#39;, &#39;volume&#39;, &#39;amount&#39;]) if df is None: continue # 处理并保存为Parquet output_file = self.process_and_save_chunk(df, market, stock_code, processed_count) if output_file: parquet_files.append(output_file) processed_count += 1 # 每处理100个文件释放内存 if processed_count % 100 == 0: self.print_memory_usage(f"已处理 {processed_count} 个文件") gc.collect() except Exception as e: print(f"处理文件 {file_path} 时出错: {str(e)}") continue print(f"市场 {market} 完成: 共 {file_count} 个文件, 成功处理 {processed_count} 个文件") # 如果没有找到有效文件 if not parquet_files: raise ValueError("没有找到有效的五分钟线数据") print(f"开始增量合并 {len(parquet_files)} 个Parquet文件...") self.print_memory_usage("合并前") # 增量合并Parquet文件 merged_file = self.incremental_merge_parquet_files(parquet_files, batch_size=50) # 加载合并后的数据 print(f"加载合并后的数据: {merged_file}") self.data = pq.read_table(merged_file).to_pandas() # 优化内存使用 self.data[&#39;stock_code&#39;] = self.data[&#39;stock_code&#39;].astype(&#39;category&#39;) print(f"数据合并完成,共 {len(self.data)} 条记录") self.print_memory_usage("合并后") # 清理临时文件 for file in parquet_files: try: os.remove(file) except: pass # 加载日线数据 daily_data = [] daily_required_columns = [&#39;date&#39;, &#39;open&#39;, &#39;high&#39;, &#39;low&#39;, &#39;close&#39;, &#39;volume&#39;] for market, path in self.daily_paths.items(): print(f"开始处理日线市场: {market}, 路径: {path}") file_count = 0 processed_count = 0 # 获取所有CSV文件 all_files = list(glob.glob(os.path.join(path, &#39;*.csv&#39;))) print(f"找到 {len(all_files)} 个日线文件") for file_path in tqdm(all_files, desc=f"处理 {market} 日线文件"): file_count += 1 stock_code = os.path.basename(file_path).split(&#39;.&#39;)[0] try: # 安全读取CSV文件 df = self.safe_read_csv(file_path, daily_required_columns) if df is None or df.empty: continue # 添加市场前缀 df[&#39;stock_code&#39;] = f"{market}_{stock_code}" # 转换日期格式 df[&#39;date&#39;] = pd.to_datetime(df[&#39;date&#39;], errors=&#39;coerce&#39;) # 删除无效日期 df = df.dropna(subset=[&#39;date&#39;]) # 筛选日期范围 df = df[(df[&#39;date&#39;] >= self.start_date) & (df[&#39;date&#39;] <= self.end_date)] if df.empty: continue # 优化内存使用 df = df[[&#39;stock_code&#39;, &#39;date&#39;, &#39;open&#39;, &#39;high&#39;, &#39;low&#39;, &#39;close&#39;, &#39;volume&#39;]] # 优化数据类型 - 修复错误: 使用astype而不是ast df[&#39;open&#39;] = df[&#39;open&#39;].astype(np.float32) df[&#39;high&#39;] = df[&#39;high&#39;].astype(np.float32) df[&#39;low&#39;] = df[&#39;low&#39;].astype(np.float32) df[&#39;close&#39;] = df[&#39;close&#39;].astype(np.float32) df[&#39;volume&#39;] = df[&#39;volume&#39;].astype(np.float32) daily_data.append(df) processed_count += 1 if processed_count % 100 == 0: self.print_memory_usage(f"已处理 {processed_count} 个日线文件") gc.collect() except Exception as e: print(f"处理日线文件 {file_path} 时出错: {str(e)}") continue print(f"日线市场 {market} 完成: 共 {file_count} 个文件, 成功处理 {processed_count} 个文件") # 合并日线数据 if daily_data: daily_df = pd.concat(daily_data, ignore_index=True) daily_df[&#39;stock_code&#39;] = daily_df[&#39;stock_code&#39;].astype(&#39;category&#39;) # 添加日线特征 self._add_daily_features(daily_df) else: print("警告: 没有找到日线数据") print(f"数据加载完成,共 {len(self.data)} 条记录") self.print_memory_usage("数据加载完成") def _add_daily_features(self, daily_df): """ 添加日线特征到五分钟线数据 - 使用内存优化技术 """ print("添加日线特征...") # 预处理日线数据 daily_df = daily_df.sort_values([&#39;stock_code&#39;, &#39;date&#39;]) # 计算日线技术指标 - 修复FutureWarning daily_df[&#39;daily_ma5&#39;] = daily_df.groupby(&#39;stock_code&#39;, observed=True)[&#39;close&#39;].transform( lambda x: x.rolling(5).mean()) daily_df[&#39;daily_ma10&#39;] = daily_df.groupby(&#39;stock_code&#39;, observed=True)[&#39;close&#39;].transform( lambda x: x.rolling(10).mean()) daily_df[&#39;daily_vol_ma5&#39;] = daily_df.groupby(&#39;stock_code&#39;, observed=True)[&#39;volume&#39;].transform( lambda x: x.rolling(5).mean()) # 计算MACD - 使用更高效的方法 def calculate_macd(group): group = group.sort_values(&#39;date&#39;) if len(group) < 26: return group.assign(daily_macd=np.nan, daily_signal=np.nan) close_vals = group[&#39;close&#39;].values.astype(np.float64) macd, signal, _ = ta.MACD(close_vals, fastperiod=12, slowperiod=26, signalperiod=9) return group.assign(daily_macd=macd, daily_signal=signal) daily_df = daily_df.groupby(&#39;stock_code&#39;, group_keys=False, observed=True).apply(calculate_macd) # 提取日期部分用于合并 self.data[&#39;date&#39;] = self.data[&#39;datetime&#39;].dt.date.astype(&#39;datetime64[ns]&#39;) # 优化数据类型 daily_df = daily_df[[&#39;stock_code&#39;, &#39;date&#39;, &#39;daily_ma5&#39;, &#39;daily_ma10&#39;, &#39;daily_vol_ma5&#39;, &#39;daily_macd&#39;, &#39;daily_signal&#39;]] daily_df[&#39;daily_ma5&#39;] = daily_df[&#39;daily_ma5&#39;].astype(np.float32) daily_df[&#39;daily_ma10&#39;] = daily_df[&#39;daily_ma10&#39;].astype(np.float32) daily_df[&#39;daily_vol_ma5&#39;] = daily_df[&#39;daily_vol_ma5&#39;].astype(np.float32) daily_df[&#39;daily_macd&#39;] = daily_df[&#39;daily_macd&#39;].astype(np.float32) daily_df[&#39;daily_signal&#39;] = daily_df[&#39;daily_signal&#39;].astype(np.float32) # 合并日线特征 self.data = pd.merge( self.data, daily_df, on=[&#39;stock_code&#39;, &#39;date&#39;], how=&#39;left&#39; ) # 删除临时列 del self.data[&#39;date&#39;] # 释放内存 del daily_df gc.collect() def create_features(self): """ 创建特征工程 - 使用内存优化技术 """ print("开始创建特征...") self.print_memory_usage("创建特征前") if self.data is None: raise ValueError("请先加载数据") # 按股票和时间排序 self.data = self.data.sort_values([&#39;stock_code&#39;, &#39;datetime&#39;]) # 特征列表 features = [] # 1. 基础特征 features.append(&#39;open&#39;) features.append(&#39;high&#39;) features.append(&#39;low&#39;) features.append(&#39;close&#39;) features.append(&#39;volume&#39;) features.append(&#39;amount&#39;) # 2. 技术指标 - 使用分组计算避免内存溢出 # 计算移动平均线 self.data[&#39;ma5&#39;] = self.data.groupby(&#39;stock_code&#39;, observed=True)[&#39;close&#39;].transform( lambda x: x.rolling(5, min_periods=1).mean()) self.data[&#39;ma10&#39;] = self.data.groupby(&#39;stock_code&#39;, observed=True)[&#39;close&#39;].transform( lambda x: x.rolling(10, min_periods=1).mean()) features.extend([&#39;ma5&#39;, &#39;ma10&#39;]) # 计算RSI - 使用更高效的方法 print("计算RSI指标...") def calculate_rsi(group): group = group.sort_values(&#39;datetime&#39;) close = group[&#39;close&#39;].values.astype(np.float64) rsi = ta.RSI(close, timeperiod=14) return group.assign(rsi=rsi) self.data = self.data.groupby(&#39;stock_code&#39;, group_keys=False, observed=True).apply(calculate_rsi) features.append(&#39;rsi&#39;) # 3. 波动率特征 print("计算波动率特征...") self.data[&#39;price_change&#39;] = self.data.groupby(&#39;stock_code&#39;, observed=True)[&#39;close&#39;].pct_change() self.data[&#39;volatility&#39;] = self.data.groupby(&#39;stock_code&#39;, observed=True)[&#39;price_change&#39;].transform( lambda x: x.rolling(10, min_periods=1).std()) features.append(&#39;volatility&#39;) # 4. 成交量特征 self.data[&#39;vol_change&#39;] = self.data.groupby(&#39;stock_code&#39;, observed=True)[&#39;volume&#39;].pct_change() self.data[&#39;vol_ma5&#39;] = self.data.groupby(&#39;stock_code&#39;, observed=True)[&#39;volume&#39;].transform( lambda x: x.rolling(5, min_periods=1).mean()) features.extend([&#39;vol_change&#39;, &#39;vol_ma5&#39;]) # 5. 日线特征 features.extend([&#39;daily_ma5&#39;, &#39;daily_ma10&#39;, &#39;daily_vol_ma5&#39;, &#39;daily_macd&#39;, &#39;daily_signal&#39;]) # 保存特征列 self.feature_cols = features # 处理缺失值 - 只删除特征列中的缺失值 self.data = self.data.dropna(subset=features) # 优化数据类型 - 使用astype而不是ast for col in features: if self.data[col].dtype == np.float64: self.data[col] = self.data[col].astype(np.float32) print(f"特征创建完成,共 {len(features)} 个特征") self.print_memory_usage("创建特征后") def clean_data(self): """ 清洗数据 - 处理无穷大和超出范围的值(修复索引问题) """ print("开始数据清洗...") self.print_memory_usage("清洗前") # 1. 检查无穷大值 inf_mask = np.isinf(self.data[self.feature_cols].values) inf_rows = np.any(inf_mask, axis=1) inf_count = np.sum(inf_rows) if inf_count > 0: print(f"发现 {inf_count} 行包含无穷大值,正在清理...") # 将无穷大替换为NaN self.data[self.feature_cols] = self.data[self.feature_cols].replace([np.inf, -np.inf], np.nan) # 2. 检查超出float32范围的值 float32_max = np.finfo(np.float32).max float32_min = np.finfo(np.float32).min # 统计超出范围的值 overflow_count = 0 for col in self.feature_cols: col_max = self.data[col].max() col_min = self.data[col].min() if col_max > float32_max or col_min < float32_min: overflow_count += 1 print(f"列 {col} 包含超出float32范围的值: min={col_min}, max={col_max}") if overflow_count > 0: print(f"共发现 {overflow_count} 列包含超出float32范围的值,正在处理...") # 缩放到安全范围 for col in self.feature_cols: col_min = self.data[col].min() col_max = self.data[col].max() # 如果范围过大,进行缩放 if col_max - col_min > 1e6: print(f"列 {col} 范围过大 ({col_min} 到 {col_max}),进行缩放...") self.data[col] = (self.data[col] - col_min) / (col_max - col_min) # 3. 处理NaN值 - 修复索引问题 nan_count = self.data[self.feature_cols].isna().sum().sum() if nan_count > 0: print(f"发现 {nan_count} 个NaN值,使用前向填充处理...") # 方法1: 使用transform保持索引一致 for col in self.feature_cols: self.data[col] = self.data.groupby(&#39;stock_code&#39;, observed=True)[col].transform( lambda x: x.fillna(method=&#39;ffill&#39;).fillna(method=&#39;bfill&#39;).fillna(0) ) # 方法2: 使用循环逐组处理(备用方法) # for stock in self.data[&#39;stock_code&#39;].unique(): # stock_mask = self.data[&#39;stock_code&#39;] == stock # self.data.loc[stock_mask, self.feature_cols] = self.data.loc[stock_mask, self.feature_cols].fillna(method=&#39;ffill&#39;).fillna(method=&#39;bfill&#39;).fillna(0) # 4. 最终检查 cleaned = True for col in self.feature_cols: if np.isinf(self.data[col]).any() or self.data[col].isna().any(): print(f"警告: 列 {col} 仍包含无效值") cleaned = False if cleaned: print("数据清洗完成") else: print("数据清洗完成,但仍存在部分问题") self.print_memory_usage("清洗后") def create_labels(self): """ 创建标签 - 添加新条件: 1. 次日(T+1)收盘价(15:00)比次日(T+1)9:35收盘价大5% 2. 后日(T+2)9:35收盘价比次日(T+1)收盘价(15:00)大1% """ print("开始创建标签...") self.print_memory_usage("创建标签前") if self.data is None: raise ValueError("请先加载数据") # 按股票和时间排序 self.data = self.data.sort_values([&#39;stock_code&#39;, &#39;datetime&#39;]) # 添加日期列用于合并 self.data[&#39;date&#39;] = self.data[&#39;datetime&#39;].dt.date # 创建每日关键时间点价格数据 daily_key_points = self.data.groupby([&#39;stock_code&#39;, &#39;date&#39;]).apply( lambda x: pd.Series({ &#39;time9_35_close&#39;: x[x[&#39;datetime&#39;].dt.time == pd.to_datetime(&#39;09:35:00&#39;).time()][&#39;close&#39;].iloc[0] if not x[x[&#39;datetime&#39;].dt.time == pd.to_datetime(&#39;09:35:00&#39;).time()].empty else np.nan, &#39;time15_00_close&#39;: x[x[&#39;datetime&#39;].dt.time == pd.to_datetime(&#39;15:00:00&#39;).time()][&#39;close&#39;].iloc[0] if not x[x[&#39;datetime&#39;].dt.time == pd.to_datetime(&#39;15:00:00&#39;).time()].empty else np.nan }) ).reset_index() # 为每日关键点添加次日(T+1)和后日(T+2)数据 daily_key_points = daily_key_points.sort_values([&#39;stock_code&#39;, &#39;date&#39;]) daily_key_points[&#39;next_date&#39;] = daily_key_points.groupby(&#39;stock_code&#39;)[&#39;date&#39;].shift(-1) daily_key_points[&#39;next_next_date&#39;] = daily_key_points.groupby(&#39;stock_code&#39;)[&#39;date&#39;].shift(-2) # 合并次日(T+1)数据 daily_key_points = pd.merge( daily_key_points, daily_key_points[[&#39;stock_code&#39;, &#39;date&#39;, &#39;time9_35_close&#39;, &#39;time15_00_close&#39;]].rename( columns={ &#39;date&#39;: &#39;next_date&#39;, &#39;time9_35_close&#39;: &#39;next_time9_35_close&#39;, &#39;time15_00_close&#39;: &#39;next_time15_00_close&#39; } ), on=[&#39;stock_code&#39;, &#39;next_date&#39;], how=&#39;left&#39; ) # 合并后日(T+2)数据 daily_key_points = pd.merge( daily_key_points, daily_key_points[[&#39;stock_code&#39;, &#39;date&#39;, &#39;time9_35_close&#39;]].rename( columns={ &#39;date&#39;: &#39;next_next_date&#39;, &#39;time9_35_close&#39;: &#39;next_next_time9_35_close&#39; } ), on=[&#39;stock_code&#39;, &#39;next_next_date&#39;], how=&#39;left&#39; ) # 将关键点数据合并回原始数据 self.data = pd.merge( self.data, daily_key_points[[&#39;stock_code&#39;, &#39;date&#39;, &#39;next_time9_35_close&#39;, &#39;next_time15_00_close&#39;, &#39;next_next_time9_35_close&#39;]], on=[&#39;stock_code&#39;, &#39;date&#39;], how=&#39;left&#39; ) # 计算新条件 cond1 = (self.data[&#39;next_time15_00_close&#39;] > self.data[&#39;next_time9_35_close&#39;] * 1.05) cond2 = (self.data[&#39;next_next_time9_35_close&#39;] > self.data[&#39;next_time15_00_close&#39;] * 1.01) # 创建标签(满足两个条件则为1) self.data[&#39;label&#39;] = np.where(cond1 & cond2, 1, 0).astype(np.int8) # 删除中间列 self.data.drop([ &#39;date&#39;, &#39;next_time9_35_close&#39;, &#39;next_time15_00_close&#39;, &#39;next_next_time9_35_close&#39; ], axis=1, inplace=True, errors=&#39;ignore&#39;) # 保存标签 self.labels = self.data[&#39;label&#39;] # 分析标签分布 label_counts = self.data[&#39;label&#39;].value_counts(normalize=True) print(f"标签分布:\n{label_counts}") print("标签创建完成") self.print_memory_usage("创建标签后") def perform_clustering(self, n_clusters=5, batch_size=100000): """ 执行聚类分析 - 使用MiniBatchKMeans处理大数据 :param n_clusters: 聚类数量 :param batch_size: 每次处理的样本数量 """ print(f"开始聚类分析,聚类数: {n_clusters}...") self.print_memory_usage("聚类前") if self.feature_cols is None: raise ValueError("请先创建特征") # 添加数据清洗步骤 self.clean_data() # 标准化特征 print("标准化特征...") self.scaler.fit(self.data[self.feature_cols]) # 使用MiniBatchKMeans进行聚类 self.cluster_model = MiniBatchKMeans( n_clusters=n_clusters, batch_size=batch_size, random_state=42, n_init=3 ) # 分批处理数据 print("分批聚类...") n_samples = len(self.data) for i in tqdm(range(0, n_samples, batch_size), desc="聚类进度"): batch_data = self.data.iloc[i:i+batch_size] scaled_batch = self.scaler.transform(batch_data[self.feature_cols]) self.cluster_model.partial_fit(scaled_batch) # 获取最终聚类结果 print("获取聚类结果...") clusters = [] for i in tqdm(range(0, n_samples, batch_size), desc="分配聚类"): batch_data = self.data.iloc[i:i+batch_size] scaled_batch = self.scaler.transform(batch_data[self.feature_cols]) batch_clusters = self.cluster_model.predict(scaled_batch) clusters.append(batch_clusters) # 添加聚类结果到数据 self.data[&#39;cluster&#39;] = np.concatenate(clusters) self.feature_cols.append(&#39;cluster&#39;) # 分析聚类结果 cluster_summary = self.data.groupby(&#39;cluster&#39;)[&#39;label&#39;].agg([&#39;mean&#39;, &#39;count&#39;]) print("聚类结果分析:") print(cluster_summary) # 保存聚类模型 cluster_model_path = os.path.join( self.output_path, "分钟线预测训练聚类模型.pkl" ) joblib.dump(self.cluster_model, cluster_model_path) print(f"聚类模型已保存至: {cluster_model_path}") self.print_memory_usage("聚类后") def train_prediction_model(self, sample_fraction=0.1): """ 训练预测模型 - 使用数据抽样减少内存使用 :param sample_fraction: 抽样比例 """ print("开始训练预测模型...") self.print_memory_usage("训练模型前") if self.feature_cols is None or self.labels is None: raise ValueError("请先创建特征和标签") # 抽样数据 if sample_fraction < 1.0: print(f"抽样 {sample_fraction*100:.1f}% 数据用于训练") sample_data = self.data.sample(frac=sample_fraction, random_state=42) X = sample_data[self.feature_cols] y = sample_data[&#39;label&#39;] else: X = self.data[self.feature_cols] y = self.labels # 检查类别分布 if y.nunique() < 2: print("警告: 只有一个类别的数据,无法训练模型") return # 划分训练集和测试集 X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y ) # 训练随机森林分类器 self.prediction_model = RandomForestClassifier( n_estimators=100, # 减少树的数量 max_depth=8, # 减小最大深度 min_samples_split=10, class_weight=&#39;balanced&#39;, random_state=42, n_jobs=-1 ) self.prediction_model.fit(X_train, y_train) # 评估模型 y_pred = self.prediction_model.predict(X_test) print("模型评估报告:") print(classification_report(y_test, y_pred)) # 打印混淆矩阵 cm = confusion_matrix(y_test, y_pred) print("混淆矩阵:") print(cm) # 保存预测模型 model_path = os.path.join( self.output_path, "分钟线预测训练模型.pkl" ) joblib.dump(self.prediction_model, model_path) print(f"预测模型已保存至: {model_path}") self.print_memory_usage("训练模型后") def predict_and_save(self, output_results=True): """ 使用模型进行预测并保存结果 :param output_results: 是否输出预测结果 """ print("开始预测...") self.print_memory_usage("预测前") if self.prediction_model is None: raise ValueError("请先训练预测模型") # 准备预测数据 X = self.data[self.feature_cols] # 分批预测 predictions = [] batch_size = 10000 n_samples = len(X) for i in tqdm(range(0, n_samples, batch_size), desc="预测进度"): batch_data = X.iloc[i:i+batch_size] batch_pred = self.prediction_model.predict(batch_data) predictions.append(batch_pred) # 合并预测结果 self.data[&#39;prediction&#39;] = np.concatenate(predictions) # 保存预测结果 if output_results: output_file = os.path.join(self.output_path, "预测结果.csv") self.data[[&#39;stock_code&#39;, &#39;datetime&#39;, &#39;close&#39;, &#39;label&#39;, &#39;prediction&#39;]].to_csv(output_file, index=False) print(f"预测结果已保存至: {output_file}") # 分析预测效果 accuracy = (self.data[&#39;label&#39;] == self.data[&#39;prediction&#39;]).mean() print(f"整体预测准确率: {accuracy:.4f}") # 按股票分析预测效果 stock_accuracy = self.data.groupby(&#39;stock_code&#39;).apply( lambda x: (x[&#39;label&#39;] == x[&#39;prediction&#39;]).mean() ) print("\n股票预测准确率统计:") print(stock_accuracy.describe()) self.print_memory_usage("预测后") def run(self, output_results=True, sample_fraction=0.1): """ 运行整个流程 - 使用内存优化技术 """ try: # 分步执行,每步完成后释放内存 self.load_and_preprocess_data() gc.collect() self.print_memory_usage("数据加载后") self.create_features() gc.collect() self.print_memory_usage("特征创建后") self.create_labels() # 使用新的标签创建方法 gc.collect() self.print_memory_usage("标签创建后") self.perform_clustering(n_clusters=self.config.get(&#39;n_clusters&#39;, 5)) gc.collect() self.print_memory_usage("聚类后") self.train_prediction_model(sample_fraction=sample_fraction) gc.collect() self.print_memory_usage("模型训练后") self.predict_and_save(output_results) gc.collect() self.print_memory_usage("预测后") print("训练和预测流程完成!") except KeyboardInterrupt: print("用户中断执行") except Exception as e: print(f"运行过程中出错: {str(e)}") import traceback traceback.print_exc() # 配置参数 config = { # 数据路径配置 &#39;five_min_sz_path&#39;: r"D:\股票量化数据库\股票五分钟线csv数据\深证", &#39;five_min_sh_path&#39;: r"D:\股票量化数据库\股票五分钟线csv数据\上证", &#39;daily_sz_path&#39;: r"D:\股票量化数据库\股票csv数据\深证", &#39;daily_sh_path&#39;: r"D:\股票量化数据库\股票csv数据\上证", # 输出路径 &#39;output_path&#39;: r"D:\股票量化数据库\预测结果", # 时间范围配置 &#39;start_date&#39;: &#39;2023-09-08&#39;, &#39;end_date&#39;: &#39;2025-08-07&#39;, # 聚类配置 &#39;n_clusters&#39;: 5 } # 创建并运行系统 if __name__ == "__main__": # 打印环境信息 print(f"Python版本: {sys.version}") print(f"Pandas版本: {pd.__version__}") # 是否输出预测结果 output_results = True # 抽样比例 (0.1 = 10%) sample_fraction = 0.1 # 设置Pandas内存选项 pd.set_option(&#39;mode.chained_assignment&#39;, None) pd.set_option(&#39;display.max_columns&#39;, None) # 设置内存优化选项 pd.set_option(&#39;compute.use_numexpr&#39;, True) pd.set_option(&#39;compute.use_bottleneck&#39;, True) # 创建并运行系统 system = StockPredictionSystem(config) system.run(output_results=output_results, sample_fraction=sample_fraction)
08-10
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符  | 博主筛选后可见
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值