7
7
#include "eswitch.h"
8
8
#include "lib/mlx5.h"
9
9
10
- void mlx5_mpesw_work (struct work_struct * work )
10
+ static int add_mpesw_rule (struct mlx5_lag * ldev )
11
11
{
12
- struct mlx5_lag * ldev = container_of (work , struct mlx5_lag , mpesw_work );
12
+ struct mlx5_core_dev * dev = ldev -> pf [MLX5_LAG_P1 ].dev ;
13
+ int err ;
13
14
14
- mutex_lock (& ldev -> lock );
15
- mlx5_disable_lag (ldev );
16
- mutex_unlock (& ldev -> lock );
17
- }
15
+ if (atomic_add_return (1 , & ldev -> lag_mpesw .mpesw_rule_count ) != 1 )
16
+ return 0 ;
18
17
19
- static void mlx5_lag_disable_mpesw (struct mlx5_core_dev * dev )
20
- {
21
- struct mlx5_lag * ldev = dev -> priv .lag ;
18
+ if (ldev -> mode != MLX5_LAG_MODE_NONE ) {
19
+ err = - EINVAL ;
20
+ goto out_err ;
21
+ }
22
22
23
- if (!queue_work (ldev -> wq , & ldev -> mpesw_work ))
24
- mlx5_core_warn (dev , "failed to queue work\n" );
23
+ err = mlx5_activate_lag (ldev , NULL , MLX5_LAG_MODE_MPESW , false);
24
+ if (err ) {
25
+ mlx5_core_warn (dev , "Failed to create LAG in MPESW mode (%d)\n" , err );
26
+ goto out_err ;
27
+ }
28
+
29
+ return 0 ;
30
+
31
+ out_err :
32
+ atomic_dec (& ldev -> lag_mpesw .mpesw_rule_count );
33
+ return err ;
25
34
}
26
35
27
- void mlx5_lag_del_mpesw_rule (struct mlx5_core_dev * dev )
36
+ static void del_mpesw_rule (struct mlx5_lag * ldev )
28
37
{
29
- struct mlx5_lag * ldev = dev -> priv .lag ;
38
+ if (!atomic_dec_return (& ldev -> lag_mpesw .mpesw_rule_count ) &&
39
+ ldev -> mode == MLX5_LAG_MODE_MPESW )
40
+ mlx5_disable_lag (ldev );
41
+ }
30
42
31
- if (!ldev )
32
- return ;
43
+ static void mlx5_mpesw_work (struct work_struct * work )
44
+ {
45
+ struct mlx5_mpesw_work_st * mpesww = container_of (work , struct mlx5_mpesw_work_st , work );
46
+ struct mlx5_lag * ldev = mpesww -> lag ;
33
47
34
48
mutex_lock (& ldev -> lock );
35
- if (!atomic_dec_return (& ldev -> lag_mpesw .mpesw_rule_count ) &&
36
- ldev -> mode == MLX5_LAG_MODE_MPESW )
37
- mlx5_lag_disable_mpesw (dev );
49
+ if (mpesww -> op == MLX5_MPESW_OP_ENABLE )
50
+ mpesww -> result = add_mpesw_rule (ldev );
51
+ else if (mpesww -> op == MLX5_MPESW_OP_DISABLE )
52
+ del_mpesw_rule (ldev );
38
53
mutex_unlock (& ldev -> lock );
54
+
55
+ complete (& mpesww -> comp );
39
56
}
40
57
41
- int mlx5_lag_add_mpesw_rule (struct mlx5_core_dev * dev )
58
+ static int mlx5_lag_mpesw_queue_work (struct mlx5_core_dev * dev ,
59
+ enum mpesw_op op )
42
60
{
43
61
struct mlx5_lag * ldev = dev -> priv .lag ;
62
+ struct mlx5_mpesw_work_st * work ;
44
63
int err = 0 ;
45
64
46
65
if (!ldev )
47
66
return 0 ;
48
67
49
- mutex_lock ( & ldev -> lock );
50
- if (atomic_add_return ( 1 , & ldev -> lag_mpesw . mpesw_rule_count ) != 1 )
51
- goto out ;
68
+ work = kzalloc ( sizeof ( * work ), GFP_KERNEL );
69
+ if (! work )
70
+ return - ENOMEM ;
52
71
53
- if (ldev -> mode != MLX5_LAG_MODE_NONE ) {
72
+ INIT_WORK (& work -> work , mlx5_mpesw_work );
73
+ init_completion (& work -> comp );
74
+ work -> op = op ;
75
+ work -> lag = ldev ;
76
+
77
+ if (!queue_work (ldev -> wq , & work -> work )) {
78
+ mlx5_core_warn (dev , "failed to queue mpesw work\n" );
54
79
err = - EINVAL ;
55
80
goto out ;
56
81
}
57
-
58
- err = mlx5_activate_lag (ldev , NULL , MLX5_LAG_MODE_MPESW , false);
59
- if (err )
60
- mlx5_core_warn (dev , "Failed to create LAG in MPESW mode (%d)\n" , err );
61
-
82
+ wait_for_completion (& work -> comp );
83
+ err = work -> result ;
62
84
out :
63
- mutex_unlock ( & ldev -> lock );
85
+ kfree ( work );
64
86
return err ;
65
87
}
66
88
89
+ void mlx5_lag_del_mpesw_rule (struct mlx5_core_dev * dev )
90
+ {
91
+ mlx5_lag_mpesw_queue_work (dev , MLX5_MPESW_OP_DISABLE );
92
+ }
93
+
94
+ int mlx5_lag_add_mpesw_rule (struct mlx5_core_dev * dev )
95
+ {
96
+ return mlx5_lag_mpesw_queue_work (dev , MLX5_MPESW_OP_ENABLE );
97
+ }
98
+
67
99
int mlx5_lag_do_mirred (struct mlx5_core_dev * mdev , struct net_device * out_dev )
68
100
{
69
101
struct mlx5_lag * ldev = mdev -> priv .lag ;
70
102
71
103
if (!netif_is_bond_master (out_dev ) || !ldev )
72
104
return 0 ;
73
105
74
- mutex_lock (& ldev -> lock );
75
- if (ldev -> mode == MLX5_LAG_MODE_MPESW ) {
76
- mutex_unlock (& ldev -> lock );
106
+ if (ldev -> mode == MLX5_LAG_MODE_MPESW )
77
107
return - EOPNOTSUPP ;
78
- }
79
- mutex_unlock (& ldev -> lock );
108
+
80
109
return 0 ;
81
110
}
82
111
@@ -90,11 +119,10 @@ bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
90
119
91
120
void mlx5_lag_mpesw_init (struct mlx5_lag * ldev )
92
121
{
93
- INIT_WORK (& ldev -> mpesw_work , mlx5_mpesw_work );
94
122
atomic_set (& ldev -> lag_mpesw .mpesw_rule_count , 0 );
95
123
}
96
124
97
125
void mlx5_lag_mpesw_cleanup (struct mlx5_lag * ldev )
98
126
{
99
- cancel_delayed_work_sync ( & ldev -> bond_work );
127
+ WARN_ON ( atomic_read ( & ldev -> lag_mpesw . mpesw_rule_count ) );
100
128
}
0 commit comments