File size: 42,124 Bytes
89e696a
ff3d566
36f3034
 
1bfcc12
00825cb
 
be51ad8
6e09d82
 
524fe4d
9db7393
573b084
9db7393
06e133e
 
 
e51f0f6
b725fb6
681ee74
e51f0f6
 
 
 
 
 
 
 
633bb6b
b725fb6
0671d23
 
633bb6b
e51f0f6
 
 
 
ab3a4e6
e51f0f6
 
 
 
 
 
ab3a4e6
 
 
 
e51f0f6
 
ab3a4e6
e51f0f6
 
 
 
 
 
 
ab3a4e6
fe7656a
 
ad618df
dfabd70
eca3864
ea19f03
801e1c6
ad618df
f4c152d
 
586b580
 
 
 
48a2c96
586b580
d634aab
 
b42a154
 
d634aab
 
dcbaf0e
d634aab
b42a154
af4b90c
 
6160f84
ea19f03
ad618df
6e09d82
 
 
524fe4d
963ac9e
6160f84
ad618df
7a47c88
 
 
 
ad618df
7a47c88
af4b90c
7a47c88
ad618df
f637681
45bb8a5
449983f
dfabd70
ad618df
f0e35ad
dfabd70
 
41da6c5
0e7425f
 
 
 
 
e65d289
 
0e7425f
 
 
 
 
e65d289
 
0e7425f
 
 
 
 
 
 
 
 
a431e22
433fed0
 
 
 
 
e51f0f6
 
433fed0
e51f0f6
433fed0
e51f0f6
433fed0
e51f0f6
433fed0
e51f0f6
 
433fed0
 
 
681ee74
 
 
e51f0f6
681ee74
e51f0f6
ad618df
ea19f03
ad618df
 
 
d4041f6
ad618df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d4041f6
00825cb
 
fe7656a
2c1bfb4
48a2c96
5307a00
 
 
 
 
4d60872
fe7656a
a1eac8d
4d60872
714d8c4
 
33e015e
 
515f7b7
 
 
d84930f
7a66fe8
 
eb4cbbc
714d8c4
33e015e
a1eac8d
290ed5a
f643712
 
 
 
 
 
2d063f3
07a6071
c8290d2
f643712
e00ad5f
 
633bb6b
e00ad5f
 
10e6d40
714d8c4
 
d634aab
33e015e
d634aab
 
 
ec8d56f
 
1bd8569
 
ec8d56f
 
33e015e
ec8d56f
 
33e015e
ec8d56f
 
8ca9f60
ec8d56f
 
1bd8569
7c78777
b42a154
 
 
 
1bd8569
 
8f6cb18
1bd8569
b42a154
 
 
 
1bd8569
0cf8b26
48a2c96
44c5a0b
33e015e
44c5a0b
515f7b7
44c5a0b
 
36f3034
ca9aa30
 
515f7b7
ca9aa30
7303227
ca9aa30
 
 
45bb8a5
36df00a
 
515f7b7
ff3d566
 
2148f2d
ff3d566
 
 
 
515f7b7
ad618df
801e1c6
 
ad618df
515f7b7
ef1d523
 
ad618df
 
ef1d523
ff3d566
515f7b7
7303227
 
 
ca8d4de
515f7b7
ef1d523
ad618df
ef1d523
 
6183395
ad618df
ef1d523
6c1c89e
 
 
 
 
ad618df
ddf19f6
ef1d523
6183395
ad618df
ef1d523
2cd23d8
801e1c6
78153ba
7303227
ad618df
801e1c6
047c64c
ad618df
 
 
2cd23d8
047c64c
 
 
78153ba
047c64c
 
 
8d27cbf
d9d4d22
 
8d27cbf
9418a23
e10f2e9
9418a23
a2d93bb
 
 
 
 
 
959d324
 
 
 
9923f48
8d27cbf
 
959d324
 
8d27cbf
 
9418a23
 
 
 
 
959d324
 
8d27cbf
959d324
8d27cbf
 
 
 
 
ca83516
 
 
959d324
 
 
 
ca83516
 
 
959d324
9418a23
 
 
6a893b4
 
 
 
8d27cbf
6a893b4
586b580
 
6a893b4
586b580
6a893b4
8d27cbf
586b580
 
 
 
 
6a893b4
586b580
6a893b4
8d27cbf
586b580
 
 
 
 
6a893b4
586b580
6a893b4
8d27cbf
586b580
 
 
 
 
9923f48
586b580
 
 
 
 
959d324
586b580
 
 
 
6a893b4
586b580
 
881c9d5
 
 
 
 
c613d19
 
 
 
881c9d5
 
 
c613d19
881c9d5
 
 
 
 
e85884b
 
 
 
881c9d5
ca83516
047c64c
 
 
 
 
515f7b7
 
8ce4ad5
3888972
 
 
 
 
 
8ce4ad5
3888972
8ce4ad5
 
 
 
 
 
 
 
 
 
 
ff69c07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145c4e5
 
047c64c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad618df
047c64c
 
 
ad618df
047c64c
515f7b7
047c64c
ad618df
047c64c
 
515f7b7
 
145c4e5
3888972
 
 
 
 
 
047c64c
3888972
145c4e5
047c64c
145c4e5
db8ff4d
 
 
 
 
 
 
 
ff69c07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad618df
047c64c
ad618df
65cad3a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad618df
dd337a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
515f7b7
dd337a5
 
 
 
 
 
 
 
 
 
 
 
 
01bbb5b
cf71bb0
48a2c96
573b084
 
 
768acc0
 
54fd5bb
768acc0
 
cf71bb0
c5bb15d
768acc0
c5bb15d
768acc0
 
573b084
12bf0bb
573b084
 
12bf0bb
573b084
 
cf71bb0
573b084
 
2f90954
573b084
 
 
768acc0
573b084
 
768acc0
573b084
 
 
768acc0
2f90954
573b084
ca8d4de
573b084
 
ca8d4de
573b084
cf71bb0
 
573b084
768acc0
cf71bb0
cc3bd82
768acc0
cf71bb0
768acc0
cf71bb0
768acc0
9db7393
 
 
 
 
 
768acc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9db7393
768acc0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
import streamlit as st
import time
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import numpy as np
import lightgbm as lgb
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import mean_absolute_error, mean_squared_error
from joblib import dump, load
from utils import recomienda_tf

# Page configuration
st.set_page_config(page_title="DeepInsightz", page_icon=":bar_chart:", layout="wide")

# Custom CSS for dynamic theme styling
# Streamlit detects light and dark mode automatically via the user's settings in Hugging Face Spaces
if st.get_option("theme.base") == "dark":
    background_color = "#282828"
    text_color = "white"
    metric_box_color = "#4f4f4f"
    sidebar_color = "#282828"
    plot_bgcolor = "rgba(0, 0, 0, 0)"
    primary_color = '#00FF00'  # for positive delta
    negative_color = '#FF0000'  # for negative delta
else:
    background_color = "#f4f4f4"
    text_color = "#black"
    metric_box_color = "#dee2e8"
    sidebar_color = "#dee2e8"
    plot_bgcolor = "#f4f4f4"
    primary_color = '#228B22'  # for positive delta in light mode
    negative_color = '#8B0000'  # for negative delta in light mode

st.markdown(f"""
    <style>
    body {{
        background-color: {background_color};
        color: {text_color};
    }}
    [data-testid="stMetric"] {{
        background-color: {metric_box_color};
        border-radius: 10px;
        text-align: center;
        padding: 15px 0;
        margin-bottom: 20px;
    }}
    [data-testid="stMetricLabel"] {{
        display: flex;
        justify-content: center;
        align-items: center;
        color: {text_color};
    }}
    [data-testid="stSidebar"] {{
        background-color: {sidebar_color};
    }}
    </style>
""", unsafe_allow_html=True)

# Load CSV files at the top
df = pd.read_csv("df_clean.csv")
nombres_proveedores = pd.read_csv("nombres_proveedores.csv", sep=';')
euros_proveedor = pd.read_csv("euros_proveedor.csv", sep=',')
ventas_clientes = pd.read_csv("ventas_clientes.csv", sep=',')
customer_clusters = pd.read_csv('predicts/customer_clusters.csv')  # Load the customer clusters here
df_agg_2024 = pd.read_csv('predicts/df_agg_2024.csv')
pca_data_5 = pd.read_csv('pca_data.csv')
historical_data = pd.read_csv('historical_data.csv')

with st.sidebar:
    st.sidebar.title("DeepInsightz")
    page = st.sidebar.selectbox("Selecciona la herramienta que quieres utilizar...", ["📃 Resumen", "🕵️ Análisis de Cliente", "💡 Recomendación de Artículos"])

# Generamos la columna total_sales
ventas_clientes['total_sales'] = ventas_clientes[['VENTA_2021', 'VENTA_2022', 'VENTA_2023']].sum(axis=1)
ventas_clientes_3 = ventas_clientes
ventas_clientes_3['total_sales'] = ventas_clientes['total_sales'] / 3
# Ordenar los clientes de mayor a menor según sus ventas totales
ventas_top_100 = ventas_clientes.sort_values(by='total_sales', ascending=False).head(100)
ventas_top_100['total_sales'] = ventas_top_100['total_sales'] / 3


# Ensure customer codes are strings
df['CLIENTE'] = df['CLIENTE'].astype(str)
nombres_proveedores['codigo'] = nombres_proveedores['codigo'].astype(str)
euros_proveedor['CLIENTE'] = euros_proveedor['CLIENTE'].astype(str)
customer_clusters['cliente_id'] = customer_clusters['cliente_id'].astype(str)  # Ensure customer IDs are strings
fieles_df = pd.read_csv("clientes_relevantes.csv")
cestas = pd.read_csv("cestas.csv")
productos = pd.read_csv("productos.csv")
df_agg_2024['cliente_id'] = df_agg_2024['cliente_id'].astype(str)
marca_id_mapping = load('marca_id_mapping.joblib')

# Convert all columns except 'CLIENTE' to float in euros_proveedor
for col in euros_proveedor.columns:
    if col != 'CLIENTE':
        euros_proveedor[col] = pd.to_numeric(euros_proveedor[col], errors='coerce')

# Check for NaN values after conversion
if euros_proveedor.isna().any().any():
    st.warning("Some values in euros_proveedor couldn't be converted to numbers. Please review the input data.")

# Ignore the last two columns of df
df = df.iloc[:, :-2]

# Function to get supplier name
def get_supplier_name(code):
    code = str(code)  # Ensure code is a string
    name = nombres_proveedores[nombres_proveedores['codigo'] == code]['nombre'].values
    return name[0] if len(name) > 0 else code

def get_supplier_name_encoded(encoded_code):
    try:
        # Ensure the encoded code is an integer
        encoded_code = int(encoded_code)
        print(f"Encoded Code: {encoded_code}")

        # Use the label encoder to map the encoded code back to the original manufacturer code
        if encoded_code < len(marca_id_mapping.classes_):
            real_code = marca_id_mapping.inverse_transform([encoded_code])[0]
            print(f"Real Manufacturer Code: {real_code}")
        else:
            print(f"Encoded code not found in the label encoder: {encoded_code}")
            return f"Unknown code: {encoded_code}"  # Handle case where encoded code is not found

        # Now, use the real_code to find the manufacturer name in nombres_proveedores
        name = nombres_proveedores[nombres_proveedores['codigo'] == str(real_code)]['nombre'].values
        print(f"Manufacturer Name Found: {name}")  # Check what name is returned

        # Return the manufacturer name if found, otherwise return the real_code
        return name[0] if len(name) > 0 else real_code

    except Exception as e:
        print(f"Error encountered: {e}")
        return f"Error for code: {encoded_code}"

# Custom Donut Chart with Plotly for Inbound/Outbound Percentage
def create_donut_chart(values, labels, color_scheme, title):
    fig = px.pie(
        values=values, 
        names=labels, 
        hole=0.7,
        color_discrete_sequence=color_scheme
    )
    fig.update_traces(textinfo='percent+label', hoverinfo='label+percent', textposition='inside', showlegend=False)
    fig.update_layout(
        annotations=[dict(text=f"{int(values[1])}%", x=0.5, y=0.5, font_size=40, showarrow=False)],
        title=title,
        height=300,
        margin=dict(t=30, b=10, l=10, r=10),
        paper_bgcolor=plot_bgcolor,  # Use theme-dependent background color
        plot_bgcolor=plot_bgcolor
    )
    return fig

# Donut chart with color scheme based on theme
if st.get_option("theme.base") == "dark":
    donut_color_scheme = ['#155F7A', '#29b5e8']  # Dark mode colors
else:
    donut_color_scheme = ['#007BFF', '#66b5ff']  # Light mode colors

# Function to create radar chart with square root transformation
def radar_chart(categories, values, amounts, title):
    N = len(categories)
    angles = [n / float(N) * 2 * np.pi for n in range(N)]
    angles += angles[:1]
    
    fig, ax = plt.subplots(figsize=(12, 12), subplot_kw=dict(projection='polar'))
    
    # Apply square root transformation
    sqrt_values = np.sqrt(values)
    sqrt_amounts = np.sqrt(amounts)
    
    max_sqrt_value = max(sqrt_values)
    normalized_values = [v / max_sqrt_value for v in sqrt_values]
    
    # Adjust scaling for spend values
    max_sqrt_amount = max(sqrt_amounts)
    scaling_factor = 0.7  # Adjust this value to control how much the spend values are scaled up
    normalized_amounts = [min((a / max_sqrt_amount) * scaling_factor, 1.0) for a in sqrt_amounts]
    
    normalized_values += normalized_values[:1]
    ax.plot(angles, normalized_values, 'o-', linewidth=2, color='#FF69B4', label='% Units (sqrt)')
    ax.fill(angles, normalized_values, alpha=0.25, color='#FF69B4')
    
    normalized_amounts += normalized_amounts[:1]
    ax.plot(angles, normalized_amounts, 'o-', linewidth=2, color='#4B0082', label='% Spend (sqrt)')
    ax.fill(angles, normalized_amounts, alpha=0.25, color='#4B0082')
    
    ax.set_xticks(angles[:-1])
    ax.set_xticklabels(categories, size=8, wrap=True)
    ax.set_ylim(0, 1)
    
    circles = np.linspace(0, 1, 5)
    for circle in circles:
        ax.plot(angles, [circle]*len(angles), '--', color='gray', alpha=0.3, linewidth=0.5)
    
    ax.set_yticklabels([])
    ax.spines['polar'].set_visible(False)
    
    plt.title(title, size=16, y=1.1)
    plt.legend(loc='upper right', bbox_to_anchor=(1.3, 1.1))
    
    return fig



if page == "📃 Resumen":
    # st.title("Welcome to DeepInsightz")
    # st.markdown("""
    #     ### Data-driven Customer Clustering
    #     We analyzed thousands of customers and suppliers to help businesses make smarter sales decisions.
    # """)

    # Create layout with three columns
    col1, col2, col3 = st.columns((1.5, 4, 2.5), gap='medium')

    # Left Column (Red): Metrics and Donut Charts
    with col1:
        st.markdown('#### Información General')
        st.metric(label="Rango de fechas", value="2021-2023")
        st.metric(label="Clientes analizados", value="3.000")
        st.metric(label="Productos únicos vendidos", value="10.702")
        st.metric(label="Líneas de venta totales", value="764.396")
        


    # Middle Column (White): 3D Cluster Model and Bar Chart
    with col2:
        st.markdown('#### Cluster de Clientes 3D')

        # Create 3D PCA plot using actual data from pca_data_5
        fig_cluster = px.scatter_3d(
            pca_data_5, 
            x='PC1', 
            y='PC2', 
            z='PC3', 
            color='cluster_id', 
            hover_name='CustomerID',
            color_continuous_scale='Turbo'
        )
        fig_cluster.update_layout(
            scene=dict(aspectratio=dict(x=1, y=1, z=0.8)),  # Adjusted aspect ratio for better balance
            margin=dict(t=10, b=10, l=10, r=10),  # Tighten margins further
            height=600,  # Slightly increased height for better visibility
        )
        st.plotly_chart(fig_cluster, use_container_width=True)
    
    # Right Column (Blue): Key Metrics Overview and Data Preparation Summary
    with col3:
        # Mostrar la tabla con los 100 mejores clientes
        st.markdown('#### Top 100 Clientes')

        # Configurar columnas para mostrar los clientes y las ventas totales
        st.dataframe(ventas_top_100[['codigo_cliente', 'total_sales']],
                    column_order=("codigo_cliente", "total_sales"),
                    hide_index=True,
                    width=350,  # Ajustar el ancho de la tabla
                    height=400,  # Ajustar la altura de la tabla
                    column_config={
                        "codigo_cliente": st.column_config.TextColumn(
                            "Código de Cliente",
                        ),
                        "total_sales": st.column_config.ProgressColumn(
                            "Venta Total (€)",
                            format="%d",
                            min_value=0,
                            max_value=ventas_top_100['total_sales'].max()
                        )}
                    )
        # Calculate sales insights
        sales_min = ventas_clientes[ventas_clientes['total_sales'] > 0]['total_sales'].min()
        sales_max = ventas_clientes['total_sales'].max()
        sales_median = ventas_clientes['total_sales'].median()
        sales_90th = ventas_clientes['total_sales'].quantile(0.9)
        sales_10th = ventas_clientes['total_sales'].quantile(0.1)

        # About Section with relevant data insights
        with st.expander('Los clientes al detalle', expanded=True):
            st.write(f'''
                - **Rango de ventas**: €{sales_min:,.0f} - €{sales_max:,.0f}.
                - **Ventas Medianas**: €{sales_median:,.0f} .
                - **Percentil 90**: €{sales_90th:,.0f}.
                - **Percentil 10**: €{sales_10th:,.0f}.
            ''')
# Customer Analysis Page
elif page == "🕵️ Análisis de Cliente":
    st.markdown("""
    <h2 style='text-align: center; font-size: 2.5rem;'>Análisis de Cliente</h2>
    <p style='text-align: center; font-size: 1.2rem; color: gray;'> 
    Introduce el código del cliente para explorar información detallada del mismo, incluyendo ventas anteriores, predicciones para el año actual e información específica por fabricante.
    </p>
    """, unsafe_allow_html=True)

    # Combine text input and dropdown into a single searchable selectbox
    customer_code = st.selectbox(
        "Escribe o selecciona el código de tu cliente",
        df['CLIENTE'].unique(),  # All customer codes

        format_func=lambda x: str(x),  # Ensures the values are displayed as strings
        help="Start typing to search for a specific customer code"
    )

    if st.button("Calcular"):
        if customer_code:
            with st.spinner("Estamos identificando el grupo del cliente..."):
                # Find Customer's Cluster
                customer_match = customer_clusters[customer_clusters['cliente_id'] == customer_code]
                time.sleep(1)
                
                if not customer_match.empty:
                    cluster = customer_match['cluster_id'].values[0]
                
            with st.spinner(f"Seleccionando el modelo predictivo..."):
                # Load the Corresponding Model
                model_path = f'models/modelo_cluster_{cluster}.txt'
                gbm = lgb.Booster(model_file=model_path)

            with st.spinner("Preparando los datos..."):
                # Load predict data for that cluster
                predict_data = pd.read_csv(f'predicts/predict_cluster_{cluster}.csv')
                
                # Convert cliente_id to string
                predict_data['cliente_id'] = predict_data['cliente_id'].astype(str)

            with st.spinner("Filtrando data..."):
                # Filter for the specific customer
                customer_code_str = str(customer_code)
                customer_data = predict_data[predict_data['cliente_id'] == customer_code_str]

            with st.spinner("Geneerando predicciones de venta..."):
                if not customer_data.empty:
                    # Define features consistently with the training process
                    lag_features = [f'precio_total_lag_{lag}' for lag in range(1, 25)]
                    features = lag_features + ['mes', 'marca_id_encoded', 'año', 'cluster_id']
    
                    # Prepare data for prediction
                    X_predict = customer_data[features]

                    # Convert categorical features to 'category' dtype
                    categorical_features = ['mes', 'marca_id_encoded', 'cluster_id']
                    for feature in categorical_features:
                        X_predict[feature] = X_predict[feature].astype('category')
                    
                    # Make Prediction for the selected customer
                    y_pred = gbm.predict(X_predict, num_iteration=gbm.best_iteration)

                    # Reassemble the results
                    results = customer_data[['cliente_id', 'marca_id_encoded', 'fecha_mes']].copy()
                    results['ventas_predichas'] = y_pred

                    # Load actual data from df_agg_2024
                    actual_sales = df_agg_2024[df_agg_2024['cliente_id'] == customer_code_str]
                    
                    if not actual_sales.empty:
                        # Merge predictions with actual sales
                        results = results.merge(actual_sales[['cliente_id', 'marca_id_encoded', 'fecha_mes', 'precio_total']], 
                                                on=['cliente_id', 'marca_id_encoded', 'fecha_mes'], 
                                                how='left')
                        results.rename(columns={'precio_total': 'ventas_reales'}, inplace=True)
                    else:
                        # If no actual sales data for 2024, fill 'ventas_reales' with 0
                        results['ventas_reales'] = 0

                    # Ensure any missing sales data is filled with 0
                    results['ventas_reales'].fillna(0, inplace=True)

                    # Define the cutoff date for the last 12 months
                    fecha_inicio = pd.to_datetime("2023-01-01")
                    fecha_corte = pd.to_datetime("2024-09-01")

                    # Convertir fecha_mes a datetime en el DataFrame historical_data
                    historical_data['fecha_mes'] = pd.to_datetime(historical_data['fecha_mes'], errors='coerce')

                    # Ensure cliente_id is of type string and strip any leading/trailing whitespace
                    historical_data['cliente_id'] = historical_data['cliente_id'].astype(str).str.strip()
                    customer_code_str = str(customer_code).strip()  # Ensure the customer code is also properly formatted

                    filtered_historical_data = historical_data[historical_data['cliente_id'] == customer_code_str]


                    # Filtrar los datos históricos por cliente y por el rango de fechas (2023)
                    fecha_inicio_2023 = pd.to_datetime("2023-01-01")
                    fecha_fin_2023 = pd.to_datetime("2023-12-31")

                    datos_historicos = historical_data[
                        (historical_data['cliente_id'] == customer_code_str) &
                        (historical_data['fecha_mes'] >= fecha_inicio_2023) &
                        (historical_data['fecha_mes'] <= fecha_fin_2023)
                    ].groupby('fecha_mes')['precio_total'].sum().reset_index()

                    # Renombrar la columna 'precio_total' a 'ventas_historicas' si no está vacía
                    if not datos_historicos.empty:
                        datos_historicos.rename(columns={'precio_total': 'ventas_historicas'}, inplace=True)
                    else:
                        # Si los datos históricos están vacíos, generar fechas de 2023 con ventas_historicas = 0
                        fechas_2023 = pd.date_range(start='2023-01-01', end='2023-12-31', freq='M')
                        datos_historicos = pd.DataFrame({'fecha_mes': fechas_2023, 'ventas_historicas': [0] * len(fechas_2023)})

                    # Filtrar los datos de predicciones y ventas reales para 2024
                    datos_cliente_total = results.groupby('fecha_mes').agg({
                        'ventas_reales': 'sum',
                        'ventas_predichas': 'sum'
                    }).reset_index()

                    # Asegurarnos de que fecha_mes en datos_cliente_total es datetime
                    datos_cliente_total['fecha_mes'] = pd.to_datetime(datos_cliente_total['fecha_mes'], errors='coerce')

                    # Generar un rango de fechas para 2024 si no hay predicciones
                    fechas_2024 = pd.date_range(start='2024-01-01', end='2024-12-31', freq='M')
                    fechas_df_2024 = pd.DataFrame({'fecha_mes': fechas_2024})

                    # Asegurarnos de que fecha_mes en fechas_df_2024 es datetime
                    fechas_df_2024['fecha_mes'] = pd.to_datetime(fechas_df_2024['fecha_mes'], errors='coerce')

                    # Combinar datos históricos con predicciones y ventas reales usando un merge
                    # Usamos how='outer' para asegurarnos de incluir todas las fechas de 2023 y 2024
                    datos_combinados = pd.merge(datos_historicos, datos_cliente_total, on='fecha_mes', how='outer').sort_values('fecha_mes')

                    # Rellenar los NaN: 0 en ventas_historicas donde faltan predicciones, y viceversa
                    datos_combinados['ventas_historicas'].fillna(0, inplace=True)
                    datos_combinados['ventas_predichas'].fillna(0, inplace=True)
                    datos_combinados['ventas_reales'].fillna(0, inplace=True)

                    # Crear la gráfica con Plotly
                    fig = go.Figure()

                    # Graficar ventas históricas
                    fig.add_trace(go.Scatter(
                        x=datos_combinados['fecha_mes'], 
                        y=datos_combinados['ventas_historicas'], 
                        mode='lines+markers', 
                        name='Ventas Históricas', 
                        line=dict(color='blue')
                    ))

                    # Graficar ventas predichas
                    fig.add_trace(go.Scatter(
                        x=datos_combinados['fecha_mes'], 
                        y=datos_combinados['ventas_predichas'], 
                        mode='lines+markers', 
                        name='Ventas Predichas', 
                        line=dict(color='orange')
                    ))

                    # Graficar ventas reales
                    fig.add_trace(go.Scatter(
                        x=datos_combinados['fecha_mes'], 
                        y=datos_combinados['ventas_reales'], 
                        mode='lines+markers', 
                        name='Ventas Reales', 
                        line=dict(color='green')
                    ))

                    # Personalizar el layout para enfocarse en 2023 y 2024
                    fig.update_layout(
                        title=f"Ventas Históricas, Predichas y Reales para Cliente {customer_code}",
                        xaxis_title="Fecha",
                        yaxis_title="Ventas (€)",
                        height=600,
                        xaxis_range=[fecha_inicio_2023, pd.to_datetime("2024-09-30")],  # Ajustar el rango del eje x a 2023-2024
                        legend_title="Tipo de Ventas",
                        hovermode="x unified"
                    )

                    # Mostrar la gráfica en Streamlit
                    st.plotly_chart(fig)

                    # Calculate metrics for 2024 data
                    datos_2024 = datos_combinados[datos_combinados['fecha_mes'].dt.year == 2024]
                    actual = datos_2024['ventas_reales']
                    predicted = datos_2024['ventas_predichas']

                    def calculate_mape(y_true, y_pred):
                        mask = y_true != 0
                        return np.mean(np.abs((y_true[mask] - y_pred[mask]) / y_true[mask])) * 100

                    mae = mean_absolute_error(actual, predicted)
                    mse = mean_squared_error(actual, predicted)
                    rmse = np.sqrt(mse)
                    mape = calculate_mape(actual, predicted)
                    smape = np.mean(2 * np.abs(actual - predicted) / (np.abs(actual) + np.abs(predicted))) * 100

                    # Display metrics
                    st.subheader("Métricas de Predicción (2024)")
                    col1, col2, col3, col4 = st.columns(4)
                    col1.metric("MAE", f"{mae:.2f} €",help="Promedio de la diferencia absoluta entre las predicciones y los valores reales.")
                    col2.metric("MAPE", f"{mape:.2f}%",help="Porcentaje promedio de error en las predicciones.")
                    col3.metric("RMSE", f"{rmse:.2f} €",help="Medida de la desviación estándar de los residuos de predicción.")
                    col4.metric("SMAPE", f"{smape:.2f}%",help="Alternativa al MAPE que maneja mejor los valores cercanos a cero.")


                    # Split space into two columns
                    col1, col2 = st.columns(2)

                    # Column 1: Radar chart for top manufacturers
                    with col1:
                        st.subheader("¡Esto tiene buena pinta!")
                        st.info("Su cliente ha superado las ventas predichas de las siguientes marcas:")

                        # Group results by manufacturer to calculate the total predicted and actual sales
                        grouped_results = results.groupby('marca_id_encoded').agg({
                            'ventas_reales': 'sum',
                            'ventas_predichas': 'sum'
                        }).reset_index()

                        # Identify manufacturers that exceeded predicted sales
                        overperforming_manufacturers = grouped_results[grouped_results['ventas_reales'] > grouped_results['ventas_predichas']].copy()

                        if not overperforming_manufacturers.empty:
                            # Calculate the extra amount (difference between actual and predicted sales)
                            overperforming_manufacturers['extra_amount'] = overperforming_manufacturers['ventas_reales'] - overperforming_manufacturers['ventas_predichas']

                            # Sort by the highest extra amount
                            overperforming_manufacturers = overperforming_manufacturers.sort_values(by='extra_amount', ascending=False)

                            # Limit to top 10 overperforming manufacturers
                            top_overperformers = overperforming_manufacturers.head(10)

                            # Display two cards per row
                            for i in range(0, len(top_overperformers), 2):
                                cols = st.columns(2)  # Create two columns for two cards in a row

                                for j, col in enumerate(cols):
                                    if i + j < len(top_overperformers):
                                        row = top_overperformers.iloc[i + j]
                                        manufacturer_name = get_supplier_name_encoded(row['marca_id_encoded'])
                                        predicted = row['ventas_predichas']
                                        actual = row['ventas_reales']
                                        extra = row['extra_amount']

                                        # Use st.metric for compact display in each column
                                        with col:
                                            st.metric(
                                                label=f"{manufacturer_name}",
                                                value=f"{actual:.2f}€",
                                                delta=f"Exceeded by {extra:.2f}€",
                                                delta_color="normal"
                                            )


                        # Radar chart logic remains the same
                        customer_df = df[df["CLIENTE"] == str(customer_code)]
                        all_manufacturers = customer_df.iloc[:, 1:].T
                        all_manufacturers.index = all_manufacturers.index.astype(str)

                        customer_euros = euros_proveedor[euros_proveedor["CLIENTE"] == str(customer_code)]
                        sales_data = customer_euros.iloc[:, 1:].T
                        sales_data.index = sales_data.index.astype(str)

                        sales_data_filtered = sales_data.drop(index='CLIENTE', errors='ignore')
                        sales_data_filtered = sales_data_filtered.apply(pd.to_numeric, errors='coerce')
                        all_manufacturers = all_manufacturers.apply(pd.to_numeric, errors='coerce')

                        top_units = all_manufacturers.sort_values(by=all_manufacturers.columns[0], ascending=False).head(10)
                        top_sales = sales_data_filtered.sort_values(by=sales_data_filtered.columns[0], ascending=False).head(10)
                        combined_top = pd.concat([top_units, top_sales]).index.unique()[:20]

                        combined_top = [m for m in combined_top if m in all_manufacturers.index and m in sales_data_filtered.index]

                        if combined_top:
                            combined_data = pd.DataFrame({
                                'units': all_manufacturers.loc[combined_top, all_manufacturers.columns[0]],
                                'sales': sales_data_filtered.loc[combined_top, sales_data_filtered.columns[0]]
                            }).fillna(0)

                            combined_data_sorted = combined_data.sort_values(by=['units', 'sales'], ascending=False)
                            non_zero_manufacturers = combined_data_sorted[combined_data_sorted['units'] > 0]
                            
                            if len(non_zero_manufacturers) < 3:
                                zero_manufacturers = combined_data_sorted[combined_data_sorted['units'] == 0].head(3 - len(non_zero_manufacturers))
                                manufacturers_to_show = pd.concat([non_zero_manufacturers, zero_manufacturers])
                            else:
                                manufacturers_to_show = non_zero_manufacturers

                            values = manufacturers_to_show['units'].tolist()
                            amounts = manufacturers_to_show['sales'].tolist()
                            manufacturers = [get_supplier_name(m) for m in manufacturers_to_show.index]

                            if manufacturers:
                                fig = radar_chart(manufacturers, values, amounts, f'Gráfico de radar para los {len(manufacturers)} principales fabricantes del cliente {customer_code}')
                                st.pyplot(fig)

                    # Column 2: Alerts and additional analysis
                    with col2:
                        st.subheader("¡Puede que tengas que revisar esto!")
                        st.warning("Se esperaba que tu cliente comprara más productos de las siguientes marcas:")

                        # Group results by manufacturer to calculate the total predicted and actual sales
                        grouped_results = results.groupby('marca_id_encoded').agg({
                            'ventas_reales': 'sum',
                            'ventas_predichas': 'sum'
                        }).reset_index()

                        # Identify manufacturers that didn't meet predicted sales
                        underperforming_manufacturers = grouped_results[grouped_results['ventas_reales'] < grouped_results['ventas_predichas']].copy()

                        if not underperforming_manufacturers.empty:
                            # Calculate the missed amount
                            underperforming_manufacturers['missed_amount'] = underperforming_manufacturers['ventas_predichas'] - underperforming_manufacturers['ventas_reales']

                            # Sort by the highest missed amount
                            underperforming_manufacturers = underperforming_manufacturers.sort_values(by='missed_amount', ascending=False)

                            # Limit to top 10 missed amounts
                            top_misses = underperforming_manufacturers.head(10)

                            # Display two cards per row
                            for i in range(0, len(top_misses), 2):
                                cols = st.columns(2)  # Create two columns for two cards in a row

                                for j, col in enumerate(cols):
                                    if i + j < len(top_misses):
                                        row = top_misses.iloc[i + j]
                                        manufacturer_name = get_supplier_name_encoded(row['marca_id_encoded'])
                                        predicted = row['ventas_predichas']
                                        actual = row['ventas_reales']
                                        missed = row['missed_amount']

                                        # Use st.metric for compact display in each column
                                        with col:
                                            st.metric(
                                                label=f"{manufacturer_name}",
                                                value=f"{actual:.2f}€",
                                                delta=f"Missed by {missed:.2f}€",
                                                delta_color="inverse"
                                            )
                        else:
                            st.success("All manufacturers have met or exceeded predicted sales.")

                        # # Gráfico adicional: Comparar las ventas predichas y reales para los principales fabricantes
                        # st.markdown("### Predicted vs Actual Sales for Top Manufacturers")
                        # top_manufacturers = results.groupby('marca_id_encoded').agg({'ventas_reales': 'sum', 'ventas_predichas': 'sum'}).sort_values(by='ventas_reales', ascending=False).head(10)

                        # fig_comparison = go.Figure()
                        # fig_comparison.add_trace(go.Bar(x=top_manufacturers.index, y=top_manufacturers['ventas_reales'], name="Actual Sales", marker_color='blue'))
                        # fig_comparison.add_trace(go.Bar(x=top_manufacturers.index, y=top_manufacturers['ventas_predichas'], name="Predicted Sales", marker_color='orange'))

                        # fig_comparison.update_layout(
                        #     title="Actual vs Predicted Sales by Top Manufacturers",
                        #     xaxis_title="Manufacturer",
                        #     yaxis_title="Sales (€)",
                        #     barmode='group',
                        #     height=400,
                        #     hovermode="x unified"
                        # )

                        # st.plotly_chart(fig_comparison, use_container_width=True)

                        # Gráfico de ventas anuales
                        ventas_clientes['codigo_cliente'] = ventas_clientes['codigo_cliente'].astype(str).str.strip()

                        sales_columns = ['VENTA_2021', 'VENTA_2022', 'VENTA_2023']
                        if all(col in ventas_clientes.columns for col in sales_columns):
                            customer_sales_data = ventas_clientes[ventas_clientes['codigo_cliente'] == customer_code]

                            if not customer_sales_data.empty:
                                customer_sales = customer_sales_data[sales_columns].values[0]
                                years = ['2021', '2022', '2023']

                                # Convert 'fecha_mes' to datetime format if it's not already
                                if not pd.api.types.is_datetime64_any_dtype(results['fecha_mes']):
                                    results['fecha_mes'] = pd.to_datetime(results['fecha_mes'], errors='coerce')

                                # Add the 2024 actual and predicted data
                                if 'ventas_predichas' in results.columns and 'ventas_reales' in results.columns:
                                    actual_sales_2024 = results[results['fecha_mes'].dt.year == 2024]['ventas_reales'].sum()
                                    predicted_sales_2024 = results[results['fecha_mes'].dt.year == 2024]['ventas_predichas'].sum()

                                    # Assuming only 9 months of actual data are available, annualize the sales
                                    months_available = 9
                                    actual_sales_2024_annual = (actual_sales_2024 / months_available) * 12

                                    # Prepare data for the bar chart
                                    sales_values = list(customer_sales) + [actual_sales_2024_annual]
                                    predicted_values = list(customer_sales) + [predicted_sales_2024]

                                    years.append('2024')

                                    # Create the bar chart for historical and 2024 data
                                    fig_sales_bar = go.Figure()
                                    fig_sales_bar.add_trace(go.Bar(
                                        x=years[:3],  
                                        y=sales_values[:3],
                                        name="Historical Sales",
                                        marker_color='blue'
                                    ))

                                    fig_sales_bar.add_trace(go.Bar(
                                        x=[years[3]],  
                                        y=[sales_values[3]],
                                        name="2024 Actual Sales (Annualized)",
                                        marker_color='green'
                                    ))

                                    fig_sales_bar.add_trace(go.Bar(
                                        x=[years[3]],  
                                        y=[predicted_values[3]],
                                        name="2024 Predicted Sales",
                                        marker_color='orange'
                                    ))

                                    # Customize layout
                                    fig_sales_bar.update_layout(
                                        title=f"Ventas anuales de tu cliente",
                                        xaxis_title="Year",
                                        yaxis_title="Sales (€)",
                                        barmode='group',
                                        height=600,
                                        legend_title_text="Sales Type",
                                        hovermode="x unified"
                                    )

                                    # Display the chart
                                    st.plotly_chart(fig_sales_bar, use_container_width=True)

                                else:
                                    st.warning(f"No predicted or actual data found for customer {customer_code} for 2024.")

# Customer Recommendations Page
elif page == "💡 Recomendación de Artículos":
    # Carga de CSV necesarios cestas y productos
    cestas = pd.read_csv('cestas.csv') 
    productos = pd.read_csv('productos.csv')
    # Estilo principal de la página
    st.markdown(
        "<h1 style='text-align: center;'>Recomendación de Artículos</h1>", 
        unsafe_allow_html=True
    )
    st.markdown("""
        <p style='text-align: center; color: #5D6D7E;'>Obtén recomendaciones personalizadas para tus clientes basadas en su cesta de compra.</p>
    """, unsafe_allow_html=True)
    st.write("### Selecciona los artículos y asigna las cantidades para la cesta:")
    # Añadir separador para mejorar la segmentación visual
    st.divider()
    # Mostrar lista de artículos disponibles (ahora se usa el código asociado a cada descripción)
    available_articles = productos[['ARTICULO', 'DESCRIPCION']].drop_duplicates()
    
    # Crear diccionario para asignar las descripciones a los códigos
    article_dict = dict(zip(available_articles['DESCRIPCION'], available_articles['ARTICULO']))
    # Permitir seleccionar las descripciones, pero trabajar con los códigos
    selected_descriptions = st.multiselect("Select Articles", available_articles['DESCRIPCION'].unique())
    quantities = {}
    
    if selected_descriptions:
        st.write("### Selecciona los artículos y las unidades:")
        for description in selected_descriptions:
            code = article_dict[description]  # Usar el código del artículo
            col1, col2 = st.columns([1, 3])  # Ajustar proporciones para que las cantidades vayan a la izquierda
            with col1:
                # Caja de número para la cantidad, asociada al código
                quantities[code] = st.number_input(f"Quantity {code}", min_value=0, step=1, key=code)
            with col2:
                # Mostrar la descripción del artículo
                st.write(description)
    
    # Añadir un botón estilizado "Calcular" con icono
    if st.button("🛒 Obtener Recomendaciones"):
        # Crear una lista de artículos basada en los códigos y cantidades
        new_basket = []
        for code in quantities:
            quantity = quantities[code]
            if quantity > 0:
                new_basket.extend([code] * quantity)  # Añadir el código tantas veces como 'quantity'
        if new_basket:
            # Procesar la lista para recomendar
            recommendations_df = recomienda_tf(new_basket, cestas, productos)
            
            if not recommendations_df.empty:
                st.success("### Según tu cesta, te recomendamos que consideres añadir uno de estos artículos:")
                st.dataframe(recommendations_df, height=300, width=800)  # Ajustar el tamaño del DataFrame
            else:
                st.warning("⚠️ No recommendations found for the provided basket.")
        else:
            st.warning("⚠️ Please select at least one article and set its quantity.")
# # Customer Recommendations Page
# elif page == "Articles Recommendations":
#     st.title("Articles Recommendations")
#     st.markdown("""
#         Get tailored recommendations for your customers based on their basket.
#     """)
#     st.write("Select items and assign quantities for the basket:")
#     # Mostrar lista de artículos disponibles
#     available_articles = productos['ARTICULO'].unique()
#     selected_articles = st.multiselect("Select Articles", available_articles)
#     # Crear inputs para ingresar las cantidades de cada artículo seleccionado
#     quantities = {}
#     for article in selected_articles:
#         quantities[article] = st.number_input(f"Quantity for {article}", min_value=0, step=1)
#     if st.button("Calcular"):  # Añadimos el botón "Calcular"
#         # Crear una lista de artículos basada en la selección de códigos y cantidades
#         new_basket = []
#         for article in selected_articles:
#             quantity = quantities[article]
#             if quantity > 0:
#                 new_basket.extend([article] * quantity)  # Añadir el código 'article' tantas veces como 'quantity'
#         if new_basket:
#             # Procesar la lista para recomendar
#             recommendations_df = recomienda_tfid(new_basket)
#             if not recommendations_df.empty:
#                 st.write("### Recommendations based on the current basket:")
#                 st.dataframe(recommendations_df)
#             else:
#                 st.warning("No recommendations found for the provided basket.")
#         else:
#             st.warning("Please select at least one article and set its quantity.")