스마트 인재개발원/2차프로젝트

(스마트인재개발원) 2차프로젝트 머신러닝 마무리

앨런튜링_ 2021. 7. 17. 08:30
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from IPython.core.interactiveshell import InteractiveShell
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_auc_score , plot_roc_curve, accuracy_score
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPClassifier
import xgboost as xgb
InteractiveShell.ast_node_interactivity = "all"
pd.set_option('display.max_row', 5000)
pd.set_option('display.max_columns', 1000)
pd.set_option('mode.chained_assignment',  None)
1. 데이터 불러오기
data = pd.read_csv('합격자 스펙 데이터 (1~100059).csv', encoding='cp949')
data1 = pd.read_csv('합격자 스펙 데이터 (100060~200126).csv', encoding='cp949')
data2 = pd.read_csv('합격자 스펙 데이터 (200127~278149).csv', encoding='cp949')
data = pd.concat([data, data1, data2])
data= data.reset_index()
2. 데이터 전처리
#지원분야 정리 
data['분기'] = data['지원분야'].str.split('\n').str[0]
data['부서'] = data['지원분야'].str.split('\n').str[1]
data.drop(['Unnamed: 0','지원분야','index', '분기'], axis=1, inplace=True)

#출신전공 정리
a = data['출신전공'].str.contains('4년')
b = data['출신전공'].str.contains('초대졸')
c = data['출신전공'].str.contains('해외대학')
d = data['출신전공'].str.contains('대학원')
e = data['출신전공'].str.contains('고졸')

data['출신전공'][a] = '4년'
data['출신전공'][b] = '초대졸'
data['출신전공'][c] = '해외대학'
data['출신전공'][d] = '대학원'
data['출신전공'][e] = '고졸'

#'부서'의 직무무관 결측치 채우기
data['부서']=data['부서'].fillna('직무무관')

#고졸 데이터 제외
a = data['출신전공'].str.contains('4년')
data= data[data['출신전공'] != '고졸']

#출신전공 숫자 변환
data['출신전공'].replace('해외대학', 7, inplace=True)
data['출신전공'].replace('대학원', 6, inplace=True)
data['출신전공'].replace('4년', 5, inplace=True)
data['출신전공'].replace('초대졸', 4, inplace=True)
data['출신전공'].replace('고졸', 3, inplace=True)
data['출신전공'].replace('중졸', 2, inplace=True)
data['출신전공'].replace('초졸', 1, inplace=True)


#자격증 숫자형 데이터로 변경
data['자격증'].replace('-',0, inplace=True)
data['자격증'].replace('1개',1, inplace=True)
data['자격증'].replace('2개',2, inplace=True)
data['자격증'].replace('3개',3, inplace=True)
data['자격증'].replace('4개',4, inplace=True)
data['자격증'].replace('5개',5, inplace=True)
data['자격증'].replace('6개',6, inplace=True)
data['자격증'].replace('7개',7, inplace=True)
data['자격증'].replace('8개',8, inplace=True)
data['자격증'].replace('9개',9, inplace=True)
data['자격증'].replace('10개',10, inplace=True)
data['자격증'].replace('11개',11, inplace=True)
data['자격증'].replace('12개',12, inplace=True)
data['자격증'].replace('13개',13, inplace=True)
data['자격증'].replace('14개',14, inplace=True)
data['자격증'].replace('15개',15, inplace=True)
data['자격증'].replace('16개',16, inplace=True)

#교내/사회/봉사 숫자형 데이터로 변경
data['교내/사회/봉사'].replace('-',0, inplace=True)
data['교내/사회/봉사'].replace('1회',1, inplace=True)
data['교내/사회/봉사'].replace('2회',2, inplace=True)
data['교내/사회/봉사'].replace('3회',3, inplace=True)
data['교내/사회/봉사'].replace('4회',4, inplace=True)
data['교내/사회/봉사'].replace('5회',5, inplace=True)
data['교내/사회/봉사'].replace('6회',6, inplace=True)
data['교내/사회/봉사'].replace('7회',7, inplace=True)
data['교내/사회/봉사'].replace('8회',8, inplace=True)
data['교내/사회/봉사'].replace('9회',9, inplace=True)
data['교내/사회/봉사'].replace('10회',10, inplace=True)
data['교내/사회/봉사'].replace('11회',11, inplace=True)
data['교내/사회/봉사'].replace('12회',12, inplace=True)
data['교내/사회/봉사'].replace('13회',12, inplace=True)
data['교내/사회/봉사'].replace('14회',14, inplace=True)
data['교내/사회/봉사'].replace('-',0, inplace=True)

#해외경험 숫자형 데이터로 변경
data['해외경험'].replace('-',0, inplace=True)
data['해외경험'].replace('1회',1, inplace=True)
data['해외경험'].replace('2회',2, inplace=True)
data['해외경험'].replace('3회',3, inplace=True)
data['해외경험'].replace('4회',4, inplace=True)
data['해외경험'].replace('5회',5, inplace=True)
data['해외경험'].replace('6회',6, inplace=True)
data['해외경험'].replace('11회',11, inplace=True)
data['해외경험'].replace('7회',7, inplace=True)
data['해외경험'].replace('14회',14, inplace=True)

#인턴 숫자형 데이터로 변경
data['인턴'].replace('-',0, inplace=True)
data['인턴'].replace('1회',1, inplace=True)
data['인턴'].replace('2회',2, inplace=True)
data['인턴'].replace('3회',3, inplace=True)
data['인턴'].replace('4회',4, inplace=True)
data['인턴'].replace('5회',5, inplace=True)
data['인턴'].replace('6회',6, inplace=True)
data['인턴'].replace('7회',7, inplace=True)
data['인턴'].replace('8회',8, inplace=True)
data['인턴'].replace('10회',10, inplace=True)
data['인턴'].replace('12회',12, inplace=True)
data['인턴'].replace('13회',13, inplace=True)
data['인턴'].replace('14회',14, inplace=True)
data['인턴'].replace('17회',17, inplace=True)
data['인턴'].replace('20회',21, inplace=True)
data['인턴'].replace('21회',21, inplace=True)

#수상내역 숫자형 데이터로 변경
data['수상내역'].replace('-',0, inplace=True)
data['수상내역'].replace('1회',1, inplace=True)
data['수상내역'].replace('2회',2, inplace=True)
data['수상내역'].replace('3회',3, inplace=True)
data['수상내역'].replace('4회',4, inplace=True)
data['수상내역'].replace('5회',5, inplace=True)
data['수상내역'].replace('6회',6, inplace=True)
data['수상내역'].replace('7회',7, inplace=True)
data['수상내역'].replace('9회',9, inplace=True)
data['수상내역'].replace('14회',14, inplace=True)

#외국어 숫자형 데이터로 변경
data['외국어(기타)'].replace('-',0, inplace=True)
data['외국어(기타)'].replace('1개',1, inplace=True)
data['외국어(기타)'].replace('2개',2, inplace=True)
data['외국어(기타)'].replace('3개',3, inplace=True)
data['외국어(기타)'].replace('4개',4, inplace=True)

#토익스피킹 숫자형 데이터로 변경
data['토익스피킹'].replace('Lv8',8, inplace=True)
data['토익스피킹'].replace('Lv7',7, inplace=True)
data['토익스피킹'].replace('Lv6',6, inplace=True)
data['토익스피킹'].replace('Lv5',5, inplace=True)
data['토익스피킹'].replace('Lv4',4, inplace=True)
data['토익스피킹'].replace('Lv3',3, inplace=True)
data['토익스피킹'].replace('Lv2',2, inplace=True)
data['토익스피킹'].replace('Lv1',1, inplace=True)
data['토익스피킹'].replace('-',0, inplace=True)

#OPIC 숫자형 데이터로 변경
data['OPIC'].replace('-',0, inplace=True)
data['OPIC'].replace('AL',9, inplace=True)
data['OPIC'].replace('IH',8, inplace=True)
data['OPIC'].replace('IM3',7, inplace=True)
data['OPIC'].replace('IM2',6, inplace=True)
data['OPIC'].replace('IM1',5, inplace=True)
data['OPIC'].replace('IL',4, inplace=True)
data['OPIC'].replace('NH',3, inplace=True)
data['OPIC'].replace('NL',2, inplace=True)
data['OPIC'].replace('NM',1, inplace=True)

#토익 및 학점 split
data['토익'] = data[data['토익'].str.contains('점')]['토익'].str.replace('점',"")
data['학점'] = data[data['학점'].str.contains('/4.5')]['학점'].str.replace('/4.5',"")

#토익 및 학점 결측치 0으로 채우기
data['토익']= data['토익'].fillna(0)
data['학점']= data['학점'].fillna(0)

#기업정보 정리 
data['기업정보'] = data['기업정보'].str.replace('<span></span>,<span></span>','<span>기업정보없음</span>')
data['기업정보'] = data['기업정보'].str.replace('<span></span>,<span>0명 재직중</span>','<span>기업정보없음</span>')
data['기업정보'] = data['기업정보'].str.split('</span>').str[0]
data['기업정보']= data['기업정보'].str.split('>').str[1]

#부서지원 적은 곳 제외 
list1 = ['운전 기사', '방송 케이블 프로덕션', '레저 스포츠','영화 배급','출판 인쇄 사진','디자인 CAD','사무 원무 코디'
         ,'게임','전시 공간디자인','섬유 의류 패션','의료직기타','대학교수 강사 행정직','CS관리 강의','간호사'
         ,'외국어교육','디자인기타','전시 컨벤션','DBA 데이터베이스','수출입 무역사무','환경 플랜트','연구소 R&D','토목 조경 도시 측량'
         ,'소프트웨어 하드웨어','여행 항공 숙박','연예 엔터테인먼트', '캐릭터 애니메이션','빅데이터 AI(인공지능)','이벤트 웨딩 도우미','바이오 제약 식품',
         '음악 음향','아나운서 리포터 성우','노무 헤드헌터 직업상담','공연 전시 무대 스텝','광고제작 카피 CF','단순홍보 회원관리','부동산 중개 분양 경매','전기 소방 통신 안전',
         '작가 시나리오','사회복지 요양보호 자원봉사', '광고영업','뷰티 미용 애완','회계 세무 CPA','법률 특허 상표','포장 가공','IT 디자인 컴퓨터강사','기자','배송 택배 운송',
          'HTML 퍼블리싱 UI개발','영상 사진 촬영','의사 치과 한의사','제품 산업디자인','광고 시각디자인','의류 패션 잡화디자인','통신기술 네트워크구축','감독 연출 PD','출판 편집디자인',
         '법인영업','경영분석 컨설턴트','컨텐츠 사이트운영','그래픽디자인 CG','초중고 특수학교','자격증 기술 전문교육','시스템프로그래머',
         '아웃바운드TM','QA 테스터 검증','학원상담 관리 운영','ERP 시스템분석 설계','웹디자인','채권 심사 보험 보상','학습지 과외 방문교사','설치 정비 A/S','영화 음반 배급',
        ]

for i in range(len(list1)):
    idx = data[data['부서'] == list1[i]].index
    data = data.drop(idx)

    
#데이터 및 스펙지수 조정 
    data = data[data['스펙지수']>150]
    data = data[data['부서']!='직무무관']

#리스트에 합격회사별 인덱스 담기
list_2 = data['합격회사'].value_counts().index
머신러닝 예측
# 리스트 1
# 상위 20개 합격회사 조회
for k in range(0,20):
    data_copy = data[data['합격회사']==list_2[i]]
    list_1 = (data_copy['부서'].unique())
   
    list380 = []
    list370 = []     
    list360 = []  
    list350 = []
    list340 = []
    list330 = []
    list320 = []
    list310 = []
    list300 = []
    list270 = []
    list260 = []    
    list250 = []
    list235 = []
    list230 = []
    list220 = []
    list210 = []
    list200 = []
    list190 = []
    list180 = []
    list170 = []
    list150 = []
    list130 = []
    list100 = []

# 부서별 스펙지수별로 라벨링 나누기
    for i in range(len(list_1)): 
     # 부서별 스펙지수별로 리스트 저장
        if(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>300):        
            list300.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>280):        
            list280.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>235):        
            list235.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>150):        
            list150.append(list_1[i])
        elif (data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()<150): 
            list100.append(list_1[i])

# 부서별 스펙지수로 라벨 값 변경
    for i in range(len(list300)):
        b = data_copy['부서'].str.contains(list300[i])
        data_copy['부서'][b] = '300'
    
    for i in range(len(list280)):
        b = data_copy['부서'].str.contains(list280[i])
        data_copy['부서'][b] = '280'
        
    for i in range(len(list235)):
        b = data_copy['부서'].str.contains(list235[i])
        data_copy['부서'][b] = '235'

    for i in range(len(list150)):
        b = data_copy['부서'].str.contains(list150[i])
        data_copy['부서'][b] = '150'
    
    for i in range(len(list100)):
        b = data_copy['부서'].str.contains(list100[i])
        data_copy['부서'][b] = '100' 

# RobustScaler로 정규화
    from sklearn.preprocessing import RobustScaler
    X = data_copy.iloc[:, 2:14]
    y = data_copy['부서']
    y_1 = data_copy['부서']
    y = pd.get_dummies(y)

# 모델 정의
    transformer = RobustScaler()
    transformer.fit(X)
    X = transformer.transform(X)
    
    lR = LogisticRegression()
    rf = RandomForestClassifier()
    dt = DecisionTreeClassifier()
    kn = KNeighborsClassifier()
    lr = LogisticRegression()
    
# 데이터 분리
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, shuffle=True, random_state=8)
    
    print("----------------- {} ----------------------".format(list_2[k]))
    print(list_1)
    models = [rf, dt, kn]
    for model in models:
        model.fit(X_train, y_train)
        pre = model.predict(X_test)
        scores = cross_val_score(model, X_test, y_test, cv=5).mean().round(3)
        #f1score = metrics.f1_score(y_test, y_pred).round(3)
        print(model, '\n', 'Accuracy:', scores, '\n')
    print("-------------------------------------------")
    
   
    
# 리스트 2
# 상위 20개 합격회사 조회
for k in range(0,20):
    data_copy = data[data['합격회사']==list_2[i]]
    list_1 = (data_copy['부서'].unique())
   
    list380 = []
    list370 = []     
    list360 = []  
    list350 = []
    list340 = []
    list330 = []
    list320 = []
    list310 = []
    list300 = []
    list270 = []
    list260 = []    
    list250 = []
    list235 = []
    list230 = []
    list220 = []
    list210 = []
    list200 = []
    list190 = []
    list180 = []
    list170 = []
    list150 = []
    list130 = []
    list100 = []

# 부서별 스펙지수별로 라벨링 나누기
    for i in range(len(list_1)): 
     # 부서별 스펙지수별로 리스트 저장
        if(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>300):        
            list300.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>260):        
            list260.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>235):        
            list235.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>220):        
            list220.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>150):        
            list150.append(list_1[i])
        elif (data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()<150): 
            list100.append(list_1[i])

# 부서별 스펙지수로 라벨 값 변경
    for i in range(len(list300)):
        b = data_copy['부서'].str.contains(list300[i])
        data_copy['부서'][b] = '300'
    
    for i in range(len(list260)):
        b = data_copy['부서'].str.contains(list260[i])
        data_copy['부서'][b] = '260'
        
    for i in range(len(list235)):
        b = data_copy['부서'].str.contains(list235[i])
        data_copy['부서'][b] = '235'

    for i in range(len(list220)):
        b = data_copy['부서'].str.contains(list220[i])
        data_copy['부서'][b] = '220'

    for i in range(len(list150)):
        b = data_copy['부서'].str.contains(list150[i])
        data_copy['부서'][b] = '150'
    
    for i in range(len(list100)):
        b = data_copy['부서'].str.contains(list100[i])
        data_copy['부서'][b] = '100' 

# RobustScaler로 정규화
    from sklearn.preprocessing import RobustScaler
    X = data_copy.iloc[:, 2:14]
    y = data_copy['부서']
    y_1 = data_copy['부서']
    y = pd.get_dummies(y)

# 모델 정의
    transformer = RobustScaler()
    transformer.fit(X)
    X = transformer.transform(X)
    
    lR = LogisticRegression()
    rf = RandomForestClassifier()
    dt = DecisionTreeClassifier()
    kn = KNeighborsClassifier()
    lr = LogisticRegression()
    
# 데이터 분리
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=9)
    
    print("----------------- {} ----------------------".format(list_2[k]))
    print(list_1)
    models = [rf, dt, kn]
    for model in models:
        model.fit(X_train, y_train)
        pre = model.predict(X_test)
        scores = cross_val_score(model, X_test, y_test, cv=5).mean().round(3)
        #f1score = metrics.f1_score(y_test, y_pred).round(3)
        print(model, '\n', 'Accuracy:', scores, '\n', 'Predict')
    print("-------------------------------------------")
    
   
    
# 리스트 3
# 상위 20개 합격회사 조회
for k in range(0,20):
    data_copy = data[data['합격회사']==list_2[i]]
    list_1 = (data_copy['부서'].unique())
   
    list380 = []
    list370 = []     
    list360 = []  
    list350 = []
    list340 = []
    list330 = []
    list320 = []
    list310 = []
    list300 = []
    list270 = []
    list260 = []    
    list250 = []
    list235 = []
    list230 = []
    list220 = []
    list210 = []
    list200 = []
    list190 = []
    list180 = []
    list170 = []
    list150 = []
    list130 = []
    list100 = []

# 부서별 스펙지수별로 라벨링 나누기
    for i in range(len(list_1)): 
     # 부서별 스펙지수별로 리스트 저장
        if(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>300):        
            list300.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>260):        
            list260.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>200):        
            list200.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>150):        
            list150.append(list_1[i])
        elif (data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()<150): 
            list100.append(list_1[i])

# 부서별 스펙지수로 라벨 값 변경
    for i in range(len(list300)):
        b = data_copy['부서'].str.contains(list300[i])
        data_copy['부서'][b] = '300'
        
    for i in range(len(list260)):
        b = data_copy['부서'].str.contains(list260[i])
        data_copy['부서'][b] = '260'

    for i in range(len(list200)):
        b = data_copy['부서'].str.contains(list200[i])
        data_copy['부서'][b] = '200'

    for i in range(len(list150)):
        b = data_copy['부서'].str.contains(list150[i])
        data_copy['부서'][b] = '150'
    
    for i in range(len(list100)):
        b = data_copy['부서'].str.contains(list100[i])
        data_copy['부서'][b] = '100' 

# RobustScaler로 정규화
    from sklearn.preprocessing import RobustScaler
    X = data_copy.iloc[:, 2:14]
    y = data_copy['부서']
    y_1 = data_copy['부서']
    y = pd.get_dummies(y)

# 모델 정의
    transformer = RobustScaler()
    transformer.fit(X)
    X = transformer.transform(X)
    
    lR = LogisticRegression()
    rf = RandomForestClassifier()
    dt = DecisionTreeClassifier()
    kn = KNeighborsClassifier()
    lr = LogisticRegression()
    
# 데이터 분리
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=6)
    
    print("----------------- {} ----------------------".format(list_2[k]))
    print(list_1)
    models = [rf, dt, kn]
    for model in models:
        model.fit(X_train, y_train)
        pre = model.predict(X_test)
        scores = cross_val_score(model, X_test, y_test, cv=5).mean().round(3)
        #f1score = metrics.f1_score(y_test, y_pred).round(3)
        print(model, '\n', 'Accuracy:', scores, '\n', 'Predict')
    print("-------------------------------------------")
    
   
list_2[0:20]
# 리스트 5
# 상위 20개 합격회사 조회
for k in range(0,20):
    data_copy = data[data['합격회사']==list_2[i]]
    list_1 = (data_copy['부서'].unique())
   
    list380 = []
    list370 = []     
    list360 = []  
    list350 = []
    list340 = []
    list330 = []
    list320 = []
    list310 = []
    list300 = []
    list270 = []
    list260 = []    
    list250 = []
    list235 = []
    list230 = []
    list220 = []
    list210 = []
    list200 = []
    list190 = []
    list180 = []
    list170 = []
    list150 = []
    list130 = []
    list100 = []

# 부서별 스펙지수별로 라벨링 나누기
    for i in range(len(list_1)): 
     # 부서별 스펙지수별로 리스트 저장
        if(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>300):        
            list300.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>260):        
            list260.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>230):        
            list230.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>150):        
            list150.append(list_1[i])
        elif (data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()<150): 
            list100.append(list_1[i])

# 부서별 스펙지수로 라벨 값 변경
    for i in range(len(list300)):
        b = data_copy['부서'].str.contains(list300[i])
        data_copy['부서'][b] = '300'
        
    for i in range(len(list260)):
        b = data_copy['부서'].str.contains(list260[i])
        data_copy['부서'][b] = '260'

    for i in range(len(list230)):
        b = data_copy['부서'].str.contains(list230[i])
        data_copy['부서'][b] = '230'

    for i in range(len(list150)):
        b = data_copy['부서'].str.contains(list150[i])
        data_copy['부서'][b] = '150'
    
    for i in range(len(list100)):
        b = data_copy['부서'].str.contains(list100[i])
        data_copy['부서'][b] = '100' 

# RobustScaler로 정규화
    from sklearn.preprocessing import RobustScaler
    X = data_copy.iloc[:, 2:14]
    y = data_copy['부서']
    y_1 = data_copy['부서']
    y = pd.get_dummies(y)

# 모델 정의
    transformer = RobustScaler()
    transformer.fit(X)
    X = transformer.transform(X)
    
    lR = LogisticRegression()
    rf = RandomForestClassifier()
    dt = DecisionTreeClassifier()
    kn = KNeighborsClassifier()
    lr = LogisticRegression()
    
# 데이터 분리
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=16)
    
    print("----------------- {} ----------------------".format(list_2[k]))
    print(list_1)
    models = [rf, dt, kn]
    for model in models:
        model.fit(X_train, y_train)
        pre = model.predict(X_test)
        scores = cross_val_score(model, X_test, y_test, cv=5).mean().round(3)
        #f1score = metrics.f1_score(y_test, y_pred).round(3)
        print(model, '\n', 'Accuracy:', scores, '\n')
    print("-------------------------------------------")
    
   
# 리스트 5
# 상위 20개 합격회사 조회
for k in range(0,20):
    data_copy = data[data['합격회사']==list_2[i]]
    list_1 = (data_copy['부서'].unique())
   
    list380 = []
    list370 = []     
    list360 = []  
    list350 = []
    list340 = []
    list330 = []
    list320 = []
    list310 = []
    list300 = []
    list270 = []
    list260 = []    
    list250 = []
    list235 = []
    list230 = []
    list220 = []
    list210 = []
    list200 = []
    list190 = []
    list180 = []
    list170 = []
    list150 = []
    list130 = []
    list100 = []

# 부서별 스펙지수별로 라벨링 나누기
    for i in range(len(list_1)): 
     # 부서별 스펙지수별로 리스트 저장
        if(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>300):        
            list300.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>270):        
            list270.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>220):        
            list220.append(list_1[i])
        elif(data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()>150):        
            list150.append(list_1[i])
        elif (data_copy[data_copy['부서']==list_1[i]]['스펙지수'].mean()<150): 
            list100.append(list_1[i])

# 부서별 스펙지수로 라벨 값 변경
    for i in range(len(list300)):
        b = data_copy['부서'].str.contains(list300[i])
        data_copy['부서'][b] = '300'
        
    for i in range(len(list270)):
        b = data_copy['부서'].str.contains(list270[i])
        data_copy['부서'][b] = '270'

    for i in range(len(list220)):
        b = data_copy['부서'].str.contains(list220[i])
        data_copy['부서'][b] = '220'

    for i in range(len(list150)):
        b = data_copy['부서'].str.contains(list150[i])
        data_copy['부서'][b] = '150'
    
    for i in range(len(list100)):
        b = data_copy['부서'].str.contains(list100[i])
        data_copy['부서'][b] = '100' 

# RobustScaler로 정규화
    from sklearn.preprocessing import RobustScaler
    X = data_copy.iloc[:, 2:14]
    y = data_copy['부서']
    y_1 = data_copy['부서']
    y = pd.get_dummies(y)

# 모델 정의
    transformer = RobustScaler()
    transformer.fit(X)
    X = transformer.transform(X)
    
    lR = LogisticRegression()
    rf = RandomForestClassifier()
    dt = DecisionTreeClassifier()
    kn = KNeighborsClassifier()
    lr = LogisticRegression()
    
# 데이터 분리
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=8)
    
    print("----------------- {} ----------------------".format(list_2[k]))
    print(list_1)
    models = [rf, dt, kn]
    for model in models:
        model.fit(X_train, y_train)
        pre = model.predict(X_test)
        scores = cross_val_score(model, X_test, y_test, cv=5).mean().round(3)
        #f1score = metrics.f1_score(y_test, y_pred).round(3)
        print(model, '\n', 'Accuracy:', scores, '\n')
    print("-------------------------------------------")