Justin-J commited on
Commit
855db18
·
1 Parent(s): 57e541a

Added my Project Files, Deployed my application

Browse files
Churn.jpeg ADDED
NotChurn.png ADDED
app.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import joblib
3
+ import pandas as pd
4
+ import numpy as np
5
+ import plotly.graph_objects as go
6
+ from PIL import Image
7
+ import time
8
+ import matplotlib.pyplot as plt
9
+ from io import BytesIO
10
+
11
+
12
+ num_imputer = joblib.load('numerical_imputer.joblib')
13
+ cat_imputer = joblib.load('cat_imputer.joblib')
14
+ encoder = joblib.load('encoder.joblib')
15
+ scaler = joblib.load('scaler.joblib')
16
+ lr_model = joblib.load('lr_smote_model.joblib')
17
+
18
+
19
+ def preprocess_input(input_data):
20
+ input_df = pd.DataFrame(input_data, index=[0])
21
+
22
+ cat_columns = [col for col in input_df.columns if input_df[col].dtype == 'object']
23
+ num_columns = [col for col in input_df.columns if input_df[col].dtype != 'object']
24
+
25
+ input_df_imputed_cat = cat_imputer.transform(input_df[cat_columns])
26
+ input_df_imputed_num = num_imputer.transform(input_df[num_columns])
27
+
28
+ input_encoded_df = pd.DataFrame(encoder.transform(input_df_imputed_cat).toarray(),
29
+ columns=encoder.get_feature_names_out(cat_columns))
30
+
31
+ input_df_scaled = scaler.transform(input_df_imputed_num)
32
+ input_scaled_df = pd.DataFrame(input_df_scaled, columns=num_columns)
33
+ final_df = pd.concat([input_encoded_df, input_scaled_df], axis=1)
34
+ final_df = final_df.reindex(columns=original_feature_names, fill_value=0)
35
+
36
+ return final_df
37
+
38
+
39
+ original_feature_names = ['MONTANT', 'FREQUENCE_RECH', 'REVENUE', 'ARPU_SEGMENT', 'FREQUENCE',
40
+ 'DATA_VOLUME', 'ON_NET', 'ORANGE', 'TIGO', 'ZONE1', 'ZONE2', 'REGULARITY', 'FREQ_TOP_PACK',
41
+ 'REGION_DAKAR', 'REGION_DIOURBEL', 'REGION_FATICK', 'REGION_KAFFRINE', 'REGION_KAOLACK',
42
+ 'REGION_KEDOUGOU', 'REGION_KOLDA', 'REGION_LOUGA', 'REGION_MATAM', 'REGION_SAINT-LOUIS',
43
+ 'REGION_SEDHIOU', 'REGION_TAMBACOUNDA', 'REGION_THIES', 'REGION_ZIGUINCHOR',
44
+ 'TENURE_Long-term', 'TENURE_Medium-term', 'TENURE_Mid-term', 'TENURE_Short-term',
45
+ 'TENURE_Very short-term', 'TOP_PACK_VAS', 'TOP_PACK_data', 'TOP_PACK_international',
46
+ 'TOP_PACK_messaging', 'TOP_PACK_other_services', 'TOP_PACK_social_media',
47
+ 'TOP_PACK_voice']
48
+
49
+ # Set up the Streamlit app
50
+ st.set_page_config(layout="wide")
51
+
52
+ # Main page - Churn Prediction
53
+ st.title('CUSTOMER CHURN PREDICTION APP (CCPA)')
54
+
55
+ # Main page - Churn Prediction
56
+ st.markdown("Churn is a one of the biggest problem in the telecom industry. Research has shown that the average monthly churn rate among the top 4 wireless carriers in the US is 1.9% - 2%")
57
+ st.image("bg.png", use_column_width=True)
58
+
59
+ # How to use
60
+ st.sidebar.image("welcome.png", use_column_width=True)
61
+ # st.sidebar.title("ENTER THE DETAILS OF THE CUSTOMER HERE")
62
+
63
+ # Define a dictionary of models with their names, actual models, and types
64
+ models = {
65
+ 'Logistic Regression': {'Logistic Regression': lr_model, 'type': 'logistic_regression'},
66
+ #'ComplementNB': {'ComplementNB': cnb_model, 'type': 'Complement NB'}
67
+ }
68
+
69
+ # Allow the user to select a model from the sidebar
70
+ model_name = st.sidebar.selectbox('Logistic Regression', list(models.keys()))
71
+
72
+ # Retrieve the selected model and its type from the dictionary
73
+ model = models[model_name]['Logistic Regression']
74
+ model_type = models[model_name]['type']
75
+
76
+
77
+ # Collect input from the user
78
+ st.sidebar.title('ENTER CUSTOMER DETAILS')
79
+ input_features = {
80
+ 'MONTANT': st.sidebar.number_input('Top-up Amount (MONTANT)'),
81
+ 'FREQUENCE_RECH': st.sidebar.number_input('No. of Times the Customer Refilled (FREQUENCE_RECH)'),
82
+ 'REVENUE': st.sidebar.number_input('Monthly income of the client (REVENUE)'),
83
+ 'ARPU_SEGMENT': st.sidebar.number_input('Income over 90 days / 3 (ARPU_SEGMENT)'),
84
+ 'FREQUENCE': st.sidebar.number_input('Number of times the client has made an income (FREQUENCE)'),
85
+ 'DATA_VOLUME': st.sidebar.number_input('Number of Connections (DATA_VOLUME)'),
86
+ 'ON_NET': st.sidebar.number_input('Inter Expresso Call (ON_NET)'),
87
+ 'ORANGE': st.sidebar.number_input('Call to Orange (ORANGE)'),
88
+ 'TIGO': st.sidebar.number_input('Call to Tigo (TIGO)'),
89
+ 'ZONE1': st.sidebar.number_input('Call to Zone 1 (ZONE1)'),
90
+ 'ZONE2': st.sidebar.number_input('Call to Zone 2 (ZONE2)'),
91
+ 'REGULARITY': st.sidebar.number_input('Number of Times the Client is Active for 90 Days (REGULARITY)'),
92
+ 'FREQ_TOP_PACK': st.sidebar.number_input('Number of Times the Client has Activated the Top Packs (FREQ_TOP_PACK)'),
93
+ 'REGION': st.sidebar.selectbox('Location of Each Client (REGION)', ['DAKAR','DIOURBEL','FATICK','AFFRINE','KAOLACK',
94
+ 'KEDOUGOU','KOLDA','LOUGA','MATAM','SAINT-LOUIS',
95
+ 'SEDHIOU','TAMBACOUNDA','HIES','ZIGUINCHOR' ]),
96
+
97
+ 'TENURE': st.sidebar.selectbox('Duration in the Network (TENURE)', ['Long-term','Medium-term','Mid-term','Short-term',
98
+ 'Very short-term']),
99
+ 'TOP_PACK': st.sidebar.selectbox('Most Active Pack (TOP_PACK)', ['VAS', 'data', 'international',
100
+ 'messaging','other_services', 'social_media',
101
+ 'voice'])
102
+
103
+ }
104
+
105
+ # Input validation
106
+ valid_input = True
107
+ error_messages = []
108
+
109
+ # Validate numeric inputs
110
+ numeric_ranges = {
111
+ 'MONTANT': [0, 1000000],
112
+ 'FREQUENCE_RECH': [0, 100],
113
+ 'REVENUE': [0, 1000000],
114
+ 'ARPU_SEGMENT': [0, 100000],
115
+ 'FREQUENCE': [0, 100],
116
+ 'DATA_VOLUME': [0, 100000],
117
+ 'ON_NET': [0, 100000],
118
+ 'ORANGE': [0, 100000],
119
+ 'TIGO': [0, 100000],
120
+ 'ZONE1': [0, 100000],
121
+ 'ZONE2': [0, 100000],
122
+ 'REGULARITY': [0, 100],
123
+ 'FREQ_TOP_PACK': [0, 100]
124
+ }
125
+
126
+ for feature, value in input_features.items():
127
+ range_min, range_max = numeric_ranges.get(feature, [None, None])
128
+ if range_min is not None and range_max is not None:
129
+ if not range_min <= value <= range_max:
130
+ valid_input = False
131
+ error_messages.append(f"{feature} should be between {range_min} and {range_max}.")
132
+
133
+ #Churn Prediction
134
+
135
+ def predict_churn(input_data, model):
136
+ # Preprocess the input data
137
+ preprocessed_data = preprocess_input(input_data)
138
+
139
+ # Calculate churn probabilities using the model
140
+ probabilities = model.predict_proba(preprocessed_data)
141
+
142
+ # Determine churn labels based on the model type
143
+ if model_type == "logistic_regression":
144
+ churn_labels = ["No Churn", "Churn"]
145
+ #elif model_type == "ComplementNB":
146
+ churn_labels = ["Churn", "No Churn"]
147
+ # Extract churn probability for the first sample
148
+ churn_probability = probabilities[0]
149
+
150
+ # Create a dictionary mapping churn labels to their indices
151
+ churn_indices = {label: idx for idx, label in enumerate(churn_labels)}
152
+
153
+ # Determine the index with the highest churn probability
154
+ churn_index = np.argmax(churn_probability)
155
+
156
+ # Return churn labels, churn probabilities, churn indices, and churn index
157
+ return churn_labels, churn_probability, churn_indices, churn_index
158
+
159
+ # Predict churn based on user input
160
+ if st.sidebar.button('Predict Churn'):
161
+ try:
162
+ with st.spinner("Wait, Results loading..."):
163
+ # Simulate a long-running process
164
+ progress_bar = st.progress(0)
165
+ step = 20 # A big step will reduce the execution time
166
+ for i in range(0, 100, step):
167
+ time.sleep(0.1)
168
+ progress_bar.progress(i + step)
169
+
170
+ #churn_labels, churn_probability = predict_churn(input_features, model) # Pass model1 or model2 based on the selected model
171
+ churn_labels, churn_probability, churn_indices, churn_index = predict_churn(input_features, model)
172
+
173
+ st.subheader('CHURN PREDICTION RESULTS')
174
+
175
+
176
+
177
+ col1, col2 = st.columns(2)
178
+
179
+ if churn_labels[churn_index] == "Churn":
180
+ churn_prob = churn_probability[churn_index]
181
+ with col1:
182
+ st.error(f"DANGER! This customer is likely to churn with a probability of {churn_prob * 100:.2f}% 😢")
183
+ resized_churn_image = Image.open('Churn.jpeg')
184
+ resized_churn_image = resized_churn_image.resize((350, 300)) # Adjust the width and height as desired
185
+ st.image(resized_churn_image)
186
+ # Add suggestions for retaining churned customers in the 'Churn' group
187
+ with col2:
188
+ st.info("ADVICE TO EXPRESSOR MANAGEMENT:\n"
189
+ "- Identify Reasons for Churn\n"
190
+ "- Offer Incentives\n"
191
+ "- Showcase Improvements\n"
192
+ "- Gather Feedback\n"
193
+ "- Customer Surveys\n"
194
+ "- Personalized Recommendations\n"
195
+ "- Reestablish Trust\n"
196
+ "- Follow-Up Communication\n"
197
+ "- Reactivation Campaigns\n"
198
+ "- Improve product or service offerings based on customer feedback\n"
199
+ " SUMMARY NOTE\n"
200
+ "- Remember that winning back churning customers takes time and persistence.\n"
201
+ "- It\s crucial to genuinely address their concerns and provide value to rebuild their trust in your business\n"
202
+ "- Regularly evaluate the effectiveness of your strategies and adjust them as needed based on customer responses and feedback\n")
203
+ else:
204
+ churn_prob = churn_probability[churn_index]
205
+ with col1:
206
+ st.success(f"This customer is a loyal (not churn) with a probability of {churn_prob * 100:.2f}% 😀")
207
+ resized_not_churn_image = Image.open('NotChurn.png')
208
+ resized_not_churn_image = resized_not_churn_image.resize((350, 300)) # Adjust the width and height as desired
209
+ st.image(resized_not_churn_image)
210
+ # Add suggestions for retaining churned customers in the 'Churn' group
211
+ with col2:
212
+ st.info("ADVICE TO EXPRESSOR MANAGEMENT\n"
213
+ "- Quality Products/Services\n"
214
+ "- Personalized Experience\n"
215
+ "- Loyalty Programs\n"
216
+ "- Excellent Customer Service\n"
217
+ "- Exclusive Content\n"
218
+ "- Early Access\n"
219
+ "- Personal Thank-You Notes\n"
220
+ "- Surprise Gifts or Discounts\n"
221
+ "- Feedback Opportunities\n"
222
+ "- Community Engagement\n"
223
+ "- Anniversary Celebrations\n"
224
+ "- Refer-a-Friend Programs\n"
225
+ "SUMMARY NOTE\n"
226
+ "- Remember that the key to building lasting loyalty is consistency.\n"
227
+ "- Continuously demonstrate your commitment to meeting customers needs and enhancing their experience.\n"
228
+ "- Regularly assess the effectiveness of your loyalty initiatives and adapt them based on customer feedback and preferences.")
229
+
230
+ st.subheader('Churn Probability')
231
+
232
+ # Create a donut chart to display probabilities
233
+ fig = go.Figure(data=[go.Pie(
234
+ labels=churn_labels,
235
+ values=churn_probability,
236
+ hole=0.5,
237
+ textinfo='label+percent',
238
+ marker=dict(colors=['#FFA07A', '#6495ED', '#FFD700', '#32CD32', '#FF69B4', '#8B008B']))])
239
+
240
+ fig.update_traces(
241
+ hoverinfo='label+percent',
242
+ textfont_size=12,
243
+ textposition='inside',
244
+ texttemplate='%{label}: %{percent:.2f}%'
245
+ )
246
+
247
+ fig.update_layout(
248
+ title='Churn Probability',
249
+ title_x=0.5,
250
+ showlegend=False,
251
+ width=500,
252
+ height=500
253
+ )
254
+
255
+ st.plotly_chart(fig, use_container_width=True)
256
+
257
+ # Calculate the average churn rate (replace with your actual value)
258
+
259
+ st.subheader('Customer Churn Probability Comparison')
260
+
261
+ average_churn_rate = 19
262
+
263
+ # Convert the overall churn rate to churn probability
264
+ main_data_churn_probability = average_churn_rate / 100
265
+
266
+ # Retrieve the predicted churn probability for the selected customer
267
+ predicted_churn_prob = churn_probability[churn_index]
268
+
269
+ if churn_labels[churn_index] == "Churn":
270
+ churn_prob = churn_probability[churn_index]
271
+ # Create a bar chart comparing the churn probability with the average churn rate
272
+ labels = ['Churn Probability', 'Average Churn Probability']
273
+ values = [predicted_churn_prob, main_data_churn_probability]
274
+
275
+ fig = go.Figure(data=[go.Bar(x=labels, y=values)])
276
+ fig.update_layout(
277
+ xaxis_title='Churn Probability',
278
+ yaxis_title='Probability',
279
+ title='Comparison with Average Churn Rate',
280
+ yaxis=dict(range=[0, 1]) # Set the y-axis limits between 0 and 1
281
+ )
282
+
283
+ # Add explanations
284
+ if predicted_churn_prob > main_data_churn_probability:
285
+ churn_comparison = "higher"
286
+ elif predicted_churn_prob < main_data_churn_probability:
287
+ churn_comparison = "lower"
288
+ else:
289
+ churn_comparison = "equal"
290
+
291
+ explanation = f"This compares the churn probability of the selected customer " \
292
+ f"with the average churn rate of all customers. It provides insights into how the " \
293
+ f"individual customer's churn likelihood ({predicted_churn_prob:.2f}) compares to the " \
294
+ f"overall trend. The 'Churn Probability' represents the likelihood of churn " \
295
+ f"for the selected customer, while the 'Average Churn Rate' represents the average " \
296
+ f"churn rate across all customers ({main_data_churn_probability:.2f}).\n\n" \
297
+ f"The customer's churn rate is {churn_comparison} than the average churn rate."
298
+
299
+ st.plotly_chart(fig)
300
+ st.write(explanation)
301
+ else:
302
+ # Create a bar chart comparing the no-churn probability with the average churn rate
303
+ labels = ['No-Churn Probability', 'Average Churn Probability']
304
+ values = [1 - predicted_churn_prob, main_data_churn_probability]
305
+
306
+ fig = go.Figure(data=[go.Bar(x=labels, y=values)])
307
+ fig.update_layout(
308
+ xaxis_title='Churn Probability',
309
+ yaxis_title='Probability',
310
+ title='Comparison with Average Churn Rate',
311
+ yaxis=dict(range=[0, 1]) # Set the y-axis limits between 0 and 1
312
+ )
313
+
314
+ explanation = f"This bar chart compares the churn probability of the selected customer " \
315
+ f"with the average churn rate of all customers. It provides insights into how the " \
316
+ f"individual customer's churn likelihood ({predicted_churn_prob:.2f}) compares to the " \
317
+ f"overall trend." \
318
+ f"The prediction indicates that the customer is not likely to churn. " \
319
+ f"The churn probability is lower than the no-churn probability."
320
+
321
+ st.plotly_chart(fig)
322
+ st.write(explanation)
323
+ except Exception as e:
324
+ st.error(f"An error occurred: {str(e)}")
bg.png ADDED
cat_imputer.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16d4765786a38f0c2a305bbe0e981093db2df9d6647e08083f6d108c4279d73b
3
+ size 1010
encoder.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05664412f8ceae36c79746234dfc86556cfd9ac9d0dce90832b32aa180faed24
3
+ size 1684
lr_smote_model.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f9deacd488a93280e5004d9bbc57ba1fd2e49f7bf12b095cf9c8bfc5d27a7b5
3
+ size 2255
numerical_imputer.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c85c1dee62ef35b8270b292ba21551ff6e37f49be186aadbf80c4ad19932132d
3
+ size 1071
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ joblib==1.2.0
2
+ numpy==1.22.4
3
+ pandas==1.5.3
4
+ shap==0.41.0
5
+ streamlit==1.22.0
6
+ scikit-learn==1.2.2
7
+ matplotlib==3.7.1
8
+ shap==0.41.0
9
+ uvicorn==0.22.0
10
+ pydantic==1.10.7
11
+ jinja2==3.0.2
12
+ python-multipart==0.0.6
13
+ plotly==5.16.1
scaler.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ff59c6234eaf5c0b182ddd3a80a4435bc6f3528da87d4461c0fce1e90d54df9
3
+ size 1199
welcome.png ADDED