rbmatlab  1.16.09
 All Classes Namespaces Files Functions Variables Modules Pages
stokes_volume_matrix.m
1 function res = stokes_volume_matrix(x, model, df_info, i, j)
2 %function res = stokes_volume_matrix(x, model, df_info, i, j)
3 %
4 
5 % IM 12.04.2013
6 
7 
8 vel_ndofs_per_element = df_info.values{1}.values{1}.ndofs_per_element;
9 
10 % detect velocity indices
11 if all([i j] <= vel_ndofs_per_element*2) && sum([i j] <= vel_ndofs_per_element) ~= 1
12 
13  if ~isfield(model, 'uh')
14 
15  gradient_hat_phi_i = evaluate_basis_function_derivative(df_info, x, i);
16  gradient_hat_phi_j = evaluate_basis_function_derivative(df_info, x, j);
17 
18  d = cache_function(@(xarg, xmodel)xmodel.diffusivity_tensor(...
19  df_info.grid, 1:df_info.grid.nelements, xarg, xmodel), ...
20  x, model);
21  JIT = df_info.grid.JIT;
22 
23  if ~iscell(d)
24 
25  ATJ_11 = d(:, 1) .* JIT(:, 1, 1) + d(:, 2) .* JIT(:, 2, 1);
26  ATJ_21 = d(:, 3) .* JIT(:, 1, 1) + d(:, 4) .* JIT(:, 2, 1);
27  ATJ_12 = d(:, 1) .* JIT(:, 1, 2) + d(:, 2) .* JIT(:, 2, 2);
28  ATJ_22 = d(:, 3) .* JIT(:, 1, 2) + d(:, 4) .* JIT(:, 2, 2);
29 
30  JTATJ_11 = JIT(:, 1, 1) .* ATJ_11 + JIT(:, 2, 1) .* ATJ_21;
31  JTATJ_12 = JIT(:, 1, 1) .* ATJ_12 + JIT(:, 2, 1) .* ATJ_22;
32  JTATJ_21 = JIT(:, 1, 2) .* ATJ_11 + JIT(:, 2, 2) .* ATJ_21;
33  JTATJ_22 = JIT(:, 1, 2) .* ATJ_12 + JIT(:, 2, 2) .* ATJ_22;
34 
35  res = ...
36  gradient_hat_phi_j(:, 1)' * gradient_hat_phi_i(:, 1) * JTATJ_11 + ...
37  gradient_hat_phi_j(:, 1)' * gradient_hat_phi_i(:, 2) * JTATJ_12 + ...
38  gradient_hat_phi_j(:, 2)' * gradient_hat_phi_i(:, 1) * JTATJ_21 + ...
39  gradient_hat_phi_j(:, 2)' * gradient_hat_phi_i(:, 2) * JTATJ_22;
40  else
41 
42  res = cell(1,length(d));
43  for q = 1:length(d)
44 
45  ATJ_11 = d{q}(:, 1) .* JIT(:, 1, 1) + d{q}(:, 2) .* JIT(:, 2, 1);
46  ATJ_21 = d{q}(:, 3) .* JIT(:, 1, 1) + d{q}(:, 4) .* JIT(:, 2, 1);
47  ATJ_12 = d{q}(:, 1) .* JIT(:, 1, 2) + d{q}(:, 2) .* JIT(:, 2, 2);
48  ATJ_22 = d{q}(:, 3) .* JIT(:, 1, 2) + d{q}(:, 4) .* JIT(:, 2, 2);
49 
50  JTATJ_11 = JIT(:, 1, 1) .* ATJ_11 + JIT(:, 2, 1) .* ATJ_21;
51  JTATJ_12 = JIT(:, 1, 1) .* ATJ_12 + JIT(:, 2, 1) .* ATJ_22;
52  JTATJ_21 = JIT(:, 1, 2) .* ATJ_11 + JIT(:, 2, 2) .* ATJ_21;
53  JTATJ_22 = JIT(:, 1, 2) .* ATJ_12 + JIT(:, 2, 2) .* ATJ_22;
54 
55  res{q} = ...
56  gradient_hat_phi_j(:, 1)' * gradient_hat_phi_i(:, 1) * JTATJ_11 + ...
57  gradient_hat_phi_j(:, 1)' * gradient_hat_phi_i(:, 2) * JTATJ_12 + ...
58  gradient_hat_phi_j(:, 2)' * gradient_hat_phi_i(:, 1) * JTATJ_21 + ...
59  gradient_hat_phi_j(:, 2)' * gradient_hat_phi_i(:, 2) * JTATJ_22;
60  end
61  end
62 
63  if model.has_reaction
64 
65  hat_phi_i = evaluate_basis_function(df_info, x, i);
66  hat_phi_j = evaluate_basis_function(df_info, x, j);
67 
68  r = cache_function(@(xarg, xmodel)xmodel.reaction(df_info.grid, ...
69  1:df_info.grid.nelements, ...
70  xarg, xmodel), ...
71  x, model);
72  if ~iscell(r)
73  res = res + hat_phi_i' * hat_phi_j * r;
74  else
75  for q = 1:length(r)
76 
77  res{q} = res{q} + hat_phi_i' * hat_phi_j * r{q};
78  end
79  end
80  end
81 
82  else % nonlinear term
83 
84  hat_phi_i = evaluate_basis_function(df_info, x, i);
85  gradient_hat_phi_j = evaluate_basis_function_derivative(df_info, x, j);
86  uh_vals = cache_function(@(xarg, xmodel)xmodel.uh.evaluate(':', xarg), ...
87  x, model);
88  d = cache_function(@(xarg, xmodel)xmodel.divergence_tensor(df_info.grid, ...
89  1: ...
90  df_info ...
91  .grid.nelements, ...
92  xarg, ...
93  xmodel), x, model);
94  JIT = df_info.grid.JIT;
95 
96  if ~iscell(d)
97  ATJ_11 = d(:, 1) .* JIT(:, 1, 1) + d(:, 2) .* JIT(:, 2, 1);
98  ATJ_12 = d(:, 1) .* JIT(:, 1, 2) + d(:, 2) .* JIT(:, 2, 2);
99  ATJ_21 = d(:, 3) .* JIT(:, 1, 1) + d(:, 4) .* JIT(:, 2, 1);
100  ATJ_22 = d(:, 3) .* JIT(:, 1, 2) + d(:, 4) .* JIT(:, 2, 2);
101 
102  res = hat_phi_i' * gradient_hat_phi_j(:, 1) * (uh_vals(:, 1) .* ATJ_11) + ...
103  hat_phi_i' * gradient_hat_phi_j(:, 2) * (uh_vals(:, 1) .* ATJ_12) + ...
104  hat_phi_i' * gradient_hat_phi_j(:, 1) * (uh_vals(:, 2) .* ATJ_21) + ...
105  hat_phi_i' * gradient_hat_phi_j(:, 2) * (uh_vals(:, 2) .* ATJ_22);
106  else
107 
108  d = d(model.matrix_nonlin_indices);
109 
110  res = cell(1,length(d));
111  for q = 1:length(d)
112 
113  ATJ_11 = d{q}(:, 1) .* JIT(:, 1, 1) + d{q}(:, 2) .* JIT(:, 2, 1);
114  ATJ_12 = d{q}(:, 1) .* JIT(:, 1, 2) + d{q}(:, 2) .* JIT(:, 2, 2);
115  ATJ_21 = d{q}(:, 3) .* JIT(:, 1, 1) + d{q}(:, 4) .* JIT(:, 2, 1);
116  ATJ_22 = d{q}(:, 3) .* JIT(:, 1, 2) + d{q}(:, 4) .* JIT(:, 2, 2);
117 
118  res{q} = hat_phi_i' * gradient_hat_phi_j(:, 1) * (uh_vals(:, 1) .* ATJ_11) + ...
119  hat_phi_i' * gradient_hat_phi_j(:, 2) * (uh_vals(:, 1) .* ATJ_12) + ...
120  hat_phi_i' * gradient_hat_phi_j(:, 1) * (uh_vals(:, 2) .* ATJ_21) + ...
121  hat_phi_i' * gradient_hat_phi_j(:, 2) * (uh_vals(:, 2) .* ATJ_22);
122  end
123 
124  end
125  end
126 
127 elseif sum([i j] > vel_ndofs_per_element*2) == 1 && ~isfield(model, 'uh')
128 
129  if i > j
130 
131  gradient_hat_phi = evaluate_basis_function_derivative(df_info, x, j);
132  hat_phi = evaluate_basis_function(df_info, x, i);
133  else
134 
135  gradient_hat_phi = evaluate_basis_function_derivative(df_info, x, i);
136  hat_phi = evaluate_basis_function(df_info, x, j);
137  end
138 
139  d = cache_function(@(xarg, xmodel)xmodel.divergence_tensor(df_info.grid, ...
140  1: ...
141  df_info ...
142  .grid.nelements, ...
143  xarg, xmodel), ...
144  x, model);
145  JIT = df_info.grid.JIT;
146 
147  if ~iscell(d)
148 
149  DJIT11 = d(:, 1) .* JIT(:, 1, 1) + d(:, 2) .* JIT(:, 2, 1);
150  DJIT12 = d(:, 1) .* JIT(:, 1, 2) + d(:, 2) .* JIT(:, 2, 2);
151  DJIT21 = d(:, 3) .* JIT(:, 1, 1) + d(:, 4) .* JIT(:, 2, 1);
152  DJIT22 = d(:, 3) .* JIT(:, 1, 2) + d(:, 4) .* JIT(:, 2, 2);
153 
154  res = -hat_phi(3) * ([DJIT11, DJIT21] * gradient_hat_phi(1:2, 1) + ...
155  [DJIT12, DJIT22] * gradient_hat_phi(1:2, 2));
156  else
157 
158  res = cell(1, length(d));
159 
160  for q = 1:length(d)
161 
162  DJIT11 = d{q}(:, 1) .* JIT(:, 1, 1) + d{q}(:, 2) .* JIT(:, 2, 1);
163  DJIT12 = d{q}(:, 1) .* JIT(:, 1, 2) + d{q}(:, 2) .* JIT(:, 2, 2);
164  DJIT21 = d{q}(:, 3) .* JIT(:, 1, 1) + d{q}(:, 4) .* JIT(:, 2, 1);
165  DJIT22 = d{q}(:, 3) .* JIT(:, 1, 2) + d{q}(:, 4) .* JIT(:, 2, 2);
166 
167  res{q} = -hat_phi(3) * ([DJIT11, DJIT21] * gradient_hat_phi(1:2, 1) + ...
168  [DJIT12, DJIT22] * gradient_hat_phi(1:2, 2));
169  end
170 
171  end
172 else
173  res = [];
174 end
175 
176 end
function varargout = cache_function(func_ptr, varargin)
simple caching of function call inputs are cached too!