Reference Guide  2.5.0
dynamo0p3.py
1 # -----------------------------------------------------------------------------
2 # BSD 3-Clause License
3 #
4 # Copyright (c) 2017-2024, Science and Technology Facilities Council.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 # POSSIBILITY OF SUCH DAMAGE.
33 # -----------------------------------------------------------------------------
34 # Authors R. W. Ford, A. R. Porter and S. Siso, STFC Daresbury Lab
35 # Modified I. Kavcic, A. Coughtrie, L. Turner and O. Brunt, Met Office
36 # Modified J. Henrichs, Bureau of Meteorology
37 # Modified A. B. G. Chalk and N. Nobre, STFC Daresbury Lab
38 
39 ''' This module implements the PSyclone Dynamo 0.3 API by 1)
40  specialising the required base classes in parser.py (KernelType) and
41  adding a new class (DynFuncDescriptor03) to capture function descriptor
42  metadata and 2) specialising the required base classes in psyGen.py
43  (PSy, Invokes, Invoke, InvokeSchedule, Loop, Kern, Inf, Arguments and
44  Argument). '''
45 
46 import os
47 from enum import Enum
48 from collections import OrderedDict, namedtuple
49 from dataclasses import dataclass
50 from typing import Any
51 
52 from psyclone import psyGen
53 from psyclone.configuration import Config
54 from psyclone.core import AccessType, Signature
55 from psyclone.domain.lfric.lfric_builtins import (LFRicBuiltInCallFactory,
56  LFRicBuiltIn)
57 from psyclone.domain.lfric import (FunctionSpace, KernCallAccArgList,
58  KernCallArgList,
59  LFRicCollection, LFRicConstants,
60  LFRicSymbolTable, LFRicKernCallFactory,
61  LFRicKern, LFRicInvokes, LFRicTypes,
62  LFRicLoop)
63 from psyclone.errors import GenerationError, InternalError, FieldNotFoundError
64 from psyclone.f2pygen import (AllocateGen, AssignGen, CallGen, CommentGen,
65  DeallocateGen, DeclGen, DoGen,
66  ModuleGen, TypeDeclGen, UseGen, PSyIRGen)
67 from psyclone.parse.kernel import getkerneldescriptors
68 from psyclone.parse.utils import ParseError
69 from psyclone.psyGen import (PSy, InvokeSchedule, Arguments,
70  KernelArgument, HaloExchange, GlobalSum,
71  DataAccess)
72 from psyclone.psyir.frontend.fortran import FortranReader
73 from psyclone.psyir.nodes import (
74  Reference, ACCEnterDataDirective, ScopingNode, ArrayOfStructuresReference,
75  StructureReference, Literal, IfBlock, Call, BinaryOperation, IntrinsicCall)
76 from psyclone.psyir.symbols import (INTEGER_TYPE, DataSymbol, ScalarType,
77  UnresolvedType, DataTypeSymbol,
78  ContainerSymbol, ImportInterface,
79  ArrayType, UnsupportedFortranType)
80 
81 
82 # pylint: disable=too-many-lines
83 # --------------------------------------------------------------------------- #
84 # ========== First section : Parser specialisations and classes ============= #
85 # --------------------------------------------------------------------------- #
86 #
87 
88 # ---------- Functions ------------------------------------------------------ #
89 
90 
91 def qr_basis_alloc_args(first_dim, basis_fn):
92  '''
93  Generate the list of dimensions required to allocate the
94  supplied basis/diff-basis function
95 
96  :param str first_dim: the variable name for the first dimension
97  :param basis_fn: dict holding details on the basis function
98  we want to allocate
99  :type basis_fn: dict containing 'shape', 'fspace' and and 'qr_var' keys
100  holding the quadrature shape, FunctionSpace and name
101  of the associated quadrature variable (as specified in the
102  Algorithm layer), respectively
103  :return: list of dimensions to use to allocate array
104  :rtype: list of strings
105 
106  :raises InternalError: if an unrecognised quadrature shape is encountered.
107  :raises NotImplementedError: if a quadrature shape other than \
108  "gh_quadrature_xyoz" is supplied.
109  '''
110  const = LFRicConstants()
111  if basis_fn["shape"] not in const.VALID_QUADRATURE_SHAPES:
112  raise InternalError(
113  f"Unrecognised shape ('{basis_fn['''shape''']}') specified in "
114  f"dynamo0p3.qr_basis_alloc_args(). Should be one of: "
115  f"{const.VALID_QUADRATURE_SHAPES}")
116 
117  qr_var = "_" + basis_fn["qr_var"]
118 
119  # Dimensionality of the basis arrays depends on the
120  # type of quadrature...
121  # if basis_fn["shape"] == "gh_quadrature_xyz":
122  # alloc_args = [first_dim, basis_fn["fspace"].ndf_name,
123  # "np_xyz"+"_"+basis_fn["qr_var"]]
124  if basis_fn["shape"] == "gh_quadrature_xyoz":
125  alloc_args = [first_dim, basis_fn["fspace"].ndf_name,
126  "np_xy"+qr_var, "np_z"+qr_var]
127  # elif basis_fn["shape"] == "gh_quadrature_xoyoz":
128  # alloc_args = [first_dim, basis_fn["fspace"].ndf_name,
129  # "np_x"+"_"+basis_fn["qr_var"],
130  # "np_y"+"_"+basis_fn["qr_var"],
131  # "np_z"+"_"+basis_fn["qr_var"]]
132  elif basis_fn["shape"] == "gh_quadrature_face":
133  alloc_args = [first_dim, basis_fn["fspace"].ndf_name,
134  "np_xyz"+qr_var, "nfaces"+qr_var]
135  elif basis_fn["shape"] == "gh_quadrature_edge":
136  alloc_args = [first_dim, basis_fn["fspace"].ndf_name,
137  "np_xyz"+qr_var, "nedges"+qr_var]
138  else:
139  raise NotImplementedError(
140  f"Unrecognised shape '{basis_fn['''shape''']}' specified in "
141  f"dynamo0p3.qr_basis_alloc_args(). Should be one of: "
142  f"{const.VALID_QUADRATURE_SHAPES}")
143  return alloc_args
144 
145 # ---------- Classes -------------------------------------------------------- #
146 
147 
149  ''' The Dynamo 0.3 API includes a function-space descriptor as
150  well as an argument descriptor which is not supported by the base
151  classes. This class captures the information specified in a
152  function-space descriptor. '''
153 
154  def __init__(self, func_type):
155  self._func_type_func_type = func_type
156  if func_type.name != 'func_type':
157  raise ParseError(
158  f"In the dynamo0.3 API each meta_func entry must be of type "
159  f"'func_type' but found '{func_type.name}'")
160  if len(func_type.args) < 2:
161  raise ParseError(
162  f"In the dynamo0.3 API each meta_func entry must have at "
163  f"least 2 args, but found {len(func_type.args)}")
164  self._operator_names_operator_names = []
165  const = LFRicConstants()
166  for idx, arg in enumerate(func_type.args):
167  if idx == 0: # first func_type arg
168  if arg.name not in const.VALID_FUNCTION_SPACE_NAMES:
169  raise ParseError(
170  f"In the dynamo0p3 API the 1st argument of a "
171  f"meta_func entry should be a valid function space "
172  f"name (one of {const.VALID_FUNCTION_SPACE_NAMES}), "
173  f"but found '{arg.name}' in '{func_type}'")
174  self._function_space_name_function_space_name = arg.name
175  else: # subsequent func_type args
176  if arg.name not in const.VALID_METAFUNC_NAMES:
177  raise ParseError(
178  f"In the dynamo0.3 API, the 2nd argument and all "
179  f"subsequent arguments of a meta_func entry should "
180  f"be one of {const.VALID_METAFUNC_NAMES}, but found "
181  f"'{arg.name}' in '{func_type}'")
182  if arg.name in self._operator_names_operator_names:
183  raise ParseError(
184  f"In the dynamo0.3 API, it is an error to specify an "
185  f"operator name more than once in a meta_func entry, "
186  f"but '{arg.name}' is replicated in '{func_type}'")
187  self._operator_names_operator_names.append(arg.name)
188  self._name_name = func_type.name
189 
190  @property
192  ''' Returns the name of the descriptors function space '''
193  return self._function_space_name_function_space_name
194 
195  @property
196  def operator_names(self):
197  ''' Returns a list of operators that are associated with this
198  descriptors function space '''
199  return self._operator_names_operator_names
200 
201  def __repr__(self):
202  return f"DynFuncDescriptor03({self._func_type})"
203 
204  def __str__(self):
205  res = "DynFuncDescriptor03 object" + os.linesep
206  res += f" name='{self._name}'" + os.linesep
207  res += f" nargs={len(self._operator_names)+1}" + os.linesep
208  res += f" function_space_name[{0}] = '{self._function_space_name}'" \
209  + os.linesep
210  for idx, arg in enumerate(self._operator_names_operator_names):
211  res += f" operator_name[{idx+1}] = '{arg}'" + \
212  os.linesep
213  return res
214 
215 
217  '''
218  Class responsible for parsing reference-element metadata and storing
219  the properties that a kernel requires.
220 
221  :param str kernel_name: name of the Kernel that the metadata is for.
222  :param type_declns: list of fparser1 parse tree nodes representing type \
223  declaration statements
224  :type type_declns: list of :py:class:`fparser.one.typedecl_statements.Type`
225 
226  :raises ParseError: if an unrecognised reference-element property is found.
227  :raises ParseError: if a duplicate reference-element property is found.
228 
229  '''
230  # pylint: disable=too-few-public-methods
231  class Property(Enum):
232  '''
233  Enumeration of the various properties of the Reference Element
234  (that a kernel can request). The names of each of these corresponds to
235  the names that must be used in kernel metadata.
236 
237  '''
238  NORMALS_TO_HORIZONTAL_FACES = 1
239  NORMALS_TO_VERTICAL_FACES = 2
240  NORMALS_TO_FACES = 3
241  OUTWARD_NORMALS_TO_HORIZONTAL_FACES = 4
242  OUTWARD_NORMALS_TO_VERTICAL_FACES = 5
243  OUTWARD_NORMALS_TO_FACES = 6
244 
245  def __init__(self, kernel_name, type_declns):
246  # The list of properties requested in the metadata (if any)
247  self.properties = []
248 
249  re_properties = []
250  # Search the supplied list of type declarations for the one
251  # describing the reference-element properties required by the kernel.
252  for line in type_declns:
253  for entry in line.selector:
254  if entry == "reference_element_data_type":
255  # getkerneldescriptors raises a ParseError if the named
256  # element cannot be found.
257  re_properties = getkerneldescriptors(
258  kernel_name, line, var_name="meta_reference_element",
259  var_type="reference_element_data_type")
260  break
261  if re_properties:
262  # Optimisation - stop searching if we've found a type
263  # declaration for the reference-element data
264  break
265  try:
266  # The metadata entry is a declaration of a Fortran array of type
267  # reference_element_data_type. The initialisation of each member
268  # of this array is done as a Fortran structure constructor, the
269  # argument to which gives a property of the reference element.
270  for re_prop in re_properties:
271  for arg in re_prop.args:
272  self.properties.append(
273  self.Property[str(arg).upper()])
274  except KeyError as err:
275  # We found a reference-element property that we don't recognise.
276  # Sort for consistency when testing.
277  sorted_names = sorted([prop.name for prop in self.Property])
278  raise ParseError(
279  f"Unsupported reference-element property: '{arg}'. Supported "
280  f"values are: {sorted_names}") from err
281 
282  # Check for duplicate properties
283  for prop in self.properties:
284  if self.properties.count(prop) > 1:
285  raise ParseError(f"Duplicate reference-element property "
286  f"found: '{prop}'.")
287 
288 
289 class MeshProperty(Enum):
290  '''
291  Enumeration of the various properties of the mesh that a kernel may
292  require (either named in metadata or implicitly, depending on the type
293  of kernel).
294 
295  '''
296  # pylint: disable=too-few-public-methods
297  ADJACENT_FACE = 1
298  NCELL_2D = 2
299  NCELL_2D_NO_HALOS = 3
300 
301 
303  '''
304  Parses any mesh-property kernel metadata and stores the properties that
305  a kernel requires.
306 
307  :param str kernel_name: name of the kernel that the metadata is for.
308  :param type_declns: list of fparser1 parse tree nodes representing type \
309  declaration statements.
310  :type type_declns: list of :py:class:`fparser.one.typedecl_statements.Type`
311 
312  :raises ParseError: if an unrecognised mesh property is found.
313  :raises ParseError: if a duplicate mesh property is found.
314 
315  '''
316  # pylint: disable=too-few-public-methods
317  # The properties that may be specified in kernel metadata are a subset
318  # of the MeshProperty enumeration values.
319  supported_properties = [MeshProperty.ADJACENT_FACE]
320 
321  def __init__(self, kernel_name, type_declns):
322  # The list of mesh properties requested in the metadata.
323  self.propertiesproperties = []
324 
325  mesh_props = []
326  # Search the supplied list of type declarations for the one
327  # describing the reference-element properties required by the kernel.
328  for line in type_declns:
329  for entry in line.selector:
330  if entry == "mesh_data_type":
331  # getkerneldescriptors raises a ParseError if the named
332  # element cannot be found.
333  mesh_props = getkerneldescriptors(
334  kernel_name, line, var_name="meta_mesh",
335  var_type="mesh_data_type")
336  break
337  if mesh_props:
338  # Optimisation - stop searching if we've found a type
339  # declaration for the mesh data
340  break
341  try:
342  # The metadata entry is a declaration of a Fortran array of type
343  # mesh_data_type. The initialisation of each member
344  # of this array is done as a Fortran structure constructor, the
345  # argument to which gives a mesh property.
346  for prop in mesh_props:
347  for arg in prop.args:
348  mesh_prop = MeshProperty[str(arg).upper()]
349  if mesh_prop not in self.supported_propertiessupported_properties:
350  raise KeyError()
351  self.propertiesproperties.append(mesh_prop)
352  except KeyError as err:
353  # We found a mesh property that we don't recognise or that
354  # is not supported.
355  supported_mesh_prop = [pr.name for pr in self.supported_propertiessupported_properties]
356  raise ParseError(f"Unsupported mesh property in metadata: "
357  f"'{arg}'. Supported values are: "
358  f"{supported_mesh_prop}") from err
359 
360  # Check for duplicate properties
361  for prop in self.propertiesproperties:
362  if self.propertiesproperties.count(prop) > 1:
363  raise ParseError(f"Duplicate mesh property "
364  f"found: '{prop}'.")
365 
366 # --------------------------------------------------------------------------- #
367 # ========== Second section : PSy specialisations =========================== #
368 # --------------------------------------------------------------------------- #
369 
370 # ---------- Classes -------------------------------------------------------- #
371 
372 
373 class DynamoPSy(PSy):
374  '''
375  The LFRic-specific PSy class. This creates an LFRic-specific
376  Invokes object (which controls all the required invocation calls).
377  It also overrides the PSy gen method so that we generate
378  LFRic-specific PSy module code.
379 
380  :param invoke_info: object containing the required invocation information \
381  for code optimisation and generation.
382  :type invoke_info: :py:class:`psyclone.parse.algorithm.FileInfo`
383 
384  '''
385  def __init__(self, invoke_info):
386  # Make sure the scoping node creates LFRicSymbolTables
387  # TODO #1954: Remove the protected access using a factory
388  ScopingNode._symbol_table_class = LFRicSymbolTable
389  PSy.__init__(self, invoke_info)
390  self._invokes_invokes_invokes = LFRicInvokes(invoke_info.calls, self)
391  # Initialise the dictionary that holds the names of the required
392  # LFRic constants, data structures and data structure proxies for
393  # the "use" statements in modules that contain PSy-layer routines.
394  const = LFRicConstants()
395  const_mod = const.UTILITIES_MOD_MAP["constants"]["module"]
396  infmod_list = [const_mod]
397  # Add all field and operator modules that might be used in the
398  # algorithm layer. These do not appear in the code unless a
399  # variable is added to the "only" part of the
400  # '_infrastructure_modules' map.
401  for data_type_info in const.DATA_TYPE_MAP.values():
402  infmod_list.append(data_type_info["module"])
403 
404  # This also removes any duplicates from infmod_list
405  self._infrastructure_modules_infrastructure_modules = OrderedDict(
406  (k, set()) for k in infmod_list)
407 
408  kind_names = set()
409 
410  # The infrastructure declares integer types with default
411  # precision so always add this.
412  api_config = Config.get().api_conf("dynamo0.3")
413  kind_names.add(api_config.default_kind["integer"])
414 
415  # Datatypes declare precision information themselves. However,
416  # that is not the case for literals. Therefore deal
417  # with these separately here.
418  for invoke in self.invokesinvokes.invoke_list:
419  schedule = invoke.schedule
420  for kernel in schedule.kernels():
421  for arg in kernel.args:
422  if arg.is_literal:
423  kind_names.add(arg.precision)
424  # Add precision names to the dictionary storing the required
425  # LFRic constants.
426  self._infrastructure_modules_infrastructure_modules[const_mod] = kind_names
427 
428  @property
429  def name(self):
430  '''
431  :returns: a name for the PSy layer. This is used as the PSy module \
432  name. We override the default value as the Met Office \
433  prefer "_psy" to be appended, rather than prepended.
434  :rtype: str
435 
436  '''
437  return self._name_name + "_psy"
438 
439  @property
440  def orig_name(self):
441  '''
442  :returns: the unmodified PSy-layer name.
443  :rtype: str
444 
445  '''
446  return self._name_name
447 
448  @property
450  '''
451  :returns: the dictionary that holds the names of the required \
452  LFRic infrastructure modules to create "use" \
453  statements in the PSy-layer modules.
454  :rtype: dict of set
455 
456  '''
457  return self._infrastructure_modules_infrastructure_modules
458 
459  @property
460  def gen(self):
461  '''
462  Generate PSy code for the LFRic (Dynamo0.3) API.
463 
464  :returns: root node of generated Fortran AST.
465  :rtype: :py:class:`psyir.nodes.Node`
466 
467  '''
468  # Create an empty PSy layer module
469  psy_module = ModuleGen(self.namenamename)
470 
471  # If the container has a Routine that is not an InvokeSchedule
472  # it should also be added to the generated module.
473  for routine in self.containercontainer.children:
474  if not isinstance(routine, InvokeSchedule):
475  psy_module.add(PSyIRGen(psy_module, routine))
476 
477  # Add all invoke-specific information
478  self.invokesinvokes.gen_code(psy_module)
479 
480  # Include required constants and infrastructure modules. The sets of
481  # required LFRic data structures and their proxies are updated in
482  # the relevant field and operator subclasses of LFRicCollection.
483  # Here we sort the inputs in reverse order to have "_type" before
484  # "_proxy_type" and "operator_" before "columnwise_operator_".
485  # We also iterate through the dictionary in reverse order so the
486  # "use" statements for field types are before the "use" statements
487  # for operator types.
488  for infmod in reversed(self._infrastructure_modules_infrastructure_modules):
489  if self._infrastructure_modules_infrastructure_modules[infmod]:
490  infmod_types = sorted(
491  list(self._infrastructure_modules_infrastructure_modules[infmod]), reverse=True)
492  psy_module.add(UseGen(psy_module, name=infmod,
493  only=True, funcnames=infmod_types))
494 
495  # Return the root node of the generated code
496  return psy_module.root
497 
498 
500  '''
501  Holds all information on the the mesh properties required by either an
502  invoke or a kernel stub. Note that the creation of a suitable mesh
503  object is handled in the `DynMeshes` class. This class merely deals with
504  extracting the necessary properties from that object and providing them to
505  kernels.
506 
507  :param node: kernel or invoke for which to manage mesh properties.
508  :type node: :py:class:`psyclone.domain.lfric.LFRicKern` or \
509  :py:class:`psyclone.dynamo0p3.LFRicInvoke`
510 
511  '''
512  def __init__(self, node):
513  super().__init__(node)
514 
515  # The (ordered) list of mesh properties required by this invoke or
516  # kernel stub.
517  self._properties_properties = []
518 
519  for call in self._calls_calls:
520  if call.mesh:
521  self._properties_properties += [prop for prop in call.mesh.properties
522  if prop not in self._properties_properties]
523  # Kernels that operate on the 'domain' need the number of columns,
524  # excluding those in the halo.
525  if call.iterates_over == "domain":
526  if MeshProperty.NCELL_2D_NO_HALOS not in self._properties_properties:
527  self._properties_properties.append(MeshProperty.NCELL_2D_NO_HALOS)
528  # Kernels performing CMA operations need the number of columns,
529  # including those in the halo.
530  if call.cma_operation:
531  if MeshProperty.NCELL_2D not in self._properties_properties:
532  self._properties_properties.append(MeshProperty.NCELL_2D)
533 
534  # Store properties in symbol table
535  for prop in self._properties_properties:
536  name_lower = prop.name.lower()
537  if prop.name in ["NCELL_2D", "NCELL_2D_NO_HALOS"]:
538  # This is an integer:
539  self._symbol_table_symbol_table.find_or_create_integer_symbol(
540  name_lower, tag=name_lower)
541  else:
542  # E.g.: adjacent_face
543  self._symbol_table_symbol_table.find_or_create_array(
544  name_lower, 2, ScalarType.Intrinsic.INTEGER,
545  tag=name_lower)
546 
547  def kern_args(self, stub=False, var_accesses=None,
548  kern_call_arg_list=None):
549  # pylint: disable=too-many-locals, too-many-branches
550  '''
551  Provides the list of kernel arguments associated with the mesh
552  properties that the kernel requires. Optionally adds variable
553  access information if var_accesses is given.
554 
555  :param bool stub: whether or not we are generating code for a \
556  kernel stub.
557  :param var_accesses: optional VariablesAccessInfo instance to store \
558  the information about variable accesses.
559  :type var_accesses: \
560  :py:class:`psyclone.core.VariablesAccessInfo`
561  :param kern_call_arg_list: an optional KernCallArgList instance \
562  used to store PSyIR representation of the arguments.
563  :type kern_call_arg_list: \
564  Optional[:py:class:`psyclone.domain.lfric.KernCallArgList`]
565 
566  :returns: the kernel arguments associated with the mesh properties.
567  :rtype: list of str
568 
569  :raises InternalError: if the class has been constructed for an \
570  invoke rather than a single kernel call.
571  :raises InternalError: if an unsupported mesh property is encountered.
572 
573  '''
574  if not self._kernel_kernel:
575  raise InternalError(
576  "LFRicMeshProperties.kern_args() can only be called when "
577  "LFRicMeshProperties has been instantiated for a kernel "
578  "rather than an invoke.")
579 
580  arg_list = []
581 
582  for prop in self._properties_properties:
583  if prop == MeshProperty.ADJACENT_FACE:
584  # Is this kernel already being passed the number of horizontal
585  # faces of the reference element?
586  has_nfaces = (
587  RefElementMetaData.Property.NORMALS_TO_HORIZONTAL_FACES
588  in self._kernel_kernel.reference_element.properties or
590  OUTWARD_NORMALS_TO_HORIZONTAL_FACES
591  in self._kernel_kernel.reference_element.properties)
592  if not has_nfaces:
593  if kern_call_arg_list:
594  sym = kern_call_arg_list.\
595  append_integer_reference("nfaces_re_h")
596  name = sym.name
597  else:
598  name = self._symbol_table_symbol_table.\
599  find_or_create_integer_symbol(
600  "nfaces_re_h", tag="nfaces_re_h").name
601  arg_list.append(name)
602  if var_accesses is not None:
603  var_accesses.add_access(Signature(name),
604  AccessType.READ, self._kernel_kernel)
605 
606  adj_face = "adjacent_face"
607  if not stub and kern_call_arg_list:
608  # Use the functionality in kern_call_arg_list to properly
609  # declare the symbol and to create a PSyIR reference for it
610  _, cell_ref = \
611  kern_call_arg_list.cell_ref_name(var_accesses)
612  adj_face_sym = kern_call_arg_list. \
613  append_array_reference(adj_face,
614  [":", cell_ref],
615  ScalarType.Intrinsic.INTEGER)
616  # Update the name in case there was a clash
617  adj_face = adj_face_sym.name
618  if var_accesses:
619  var_accesses.add_access(Signature(adj_face),
620  AccessType.READ, self._kernel_kernel,
621  [":", cell_ref])
622 
623  if not stub:
624  adj_face = self._symbol_table_symbol_table.find_or_create_tag(
625  "adjacent_face").name
626  cell_name = "cell"
627  if self._kernel_kernel.is_coloured():
628  colour_name = "colour"
629  cmap_name = self._symbol_table_symbol_table.find_or_create_tag(
630  "cmap", root_name="cmap").name
631  adj_face += (f"(:,{cmap_name}({colour_name},"
632  f"{cell_name}))")
633  else:
634  adj_face += f"(:,{cell_name})"
635  arg_list.append(adj_face)
636 
637  if var_accesses and not kern_call_arg_list:
638  # TODO #1320 Replace [1]
639  # The [1] just indicates that this variable is accessed
640  # as a rank 1 array. #1320 will improve this.
641  var_accesses.add_access(Signature(adj_face),
642  AccessType.READ, self._kernel_kernel,
643  [1])
644  else:
645  raise InternalError(
646  f"kern_args: found unsupported mesh property '{prop}' "
647  f"when generating arguments for kernel "
648  f"'{self._kernel.name}'. Only members of the MeshProperty "
649  f"Enum are permitted ({list(MeshProperty)}).")
650 
651  return arg_list
652 
653  def _invoke_declarations(self, parent):
654  '''
655  Creates the necessary declarations for variables needed in order to
656  provide mesh properties to a kernel call.
657 
658  :param parent: node in the f2pygen AST to which to add declarations.
659  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
660 
661  :raises InternalError: if this class has been instantiated for a \
662  kernel instead of an invoke.
663  :raises InternalError: if an unsupported mesh property is found.
664 
665  '''
666  api_config = Config.get().api_conf("dynamo0.3")
667 
668  if not self._invoke_invoke:
669  raise InternalError(
670  "_invoke_declarations() cannot be called because "
671  "LFRicMeshProperties has been instantiated for a kernel and "
672  "not an invoke.")
673 
674  for prop in self._properties_properties:
675  # The DynMeshes class will have created a mesh object so we
676  # don't need to do that here.
677  if prop == MeshProperty.ADJACENT_FACE:
678  adj_face = self._symbol_table_symbol_table.find_or_create_tag(
679  "adjacent_face").name + "(:,:) => null()"
680  parent.add(DeclGen(parent, datatype="integer",
681  kind=api_config.default_kind["integer"],
682  pointer=True, entity_decls=[adj_face]))
683  elif prop == MeshProperty.NCELL_2D_NO_HALOS:
684  name = self._symbol_table_symbol_table.find_or_create_integer_symbol(
685  "ncell_2d_no_halos",
686  tag="ncell_2d_no_halos").name
687  parent.add(DeclGen(parent, datatype="integer",
688  kind=api_config.default_kind["integer"],
689  entity_decls=[name]))
690  elif prop == MeshProperty.NCELL_2D:
691  name = self._symbol_table_symbol_table.find_or_create_integer_symbol(
692  "ncell_2d", tag="ncell_2d").name
693  parent.add(DeclGen(parent, datatype="integer",
694  kind=api_config.default_kind["integer"],
695  entity_decls=[name]))
696  else:
697  raise InternalError(
698  f"Found unsupported mesh property '{prop}' when generating"
699  f" invoke declarations. Only members of the MeshProperty "
700  f"Enum are permitted ({list(MeshProperty)}).")
701 
702  def _stub_declarations(self, parent):
703  '''
704  Creates the necessary declarations for the variables needed in order
705  to provide properties of the mesh in a kernel stub.
706 
707  :param parent: node in the f2pygen AST to which to add declarations.
708  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
709 
710  :raises InternalError: if the class has been instantiated for an \
711  invoke and not a kernel.
712  :raises InternalError: if an unsupported mesh property is encountered.
713 
714  '''
715  api_config = Config.get().api_conf("dynamo0.3")
716 
717  if not self._kernel_kernel:
718  raise InternalError(
719  "_stub_declarations() cannot be called because "
720  "LFRicMeshProperties has been instantiated for an invoke and "
721  "not a kernel.")
722 
723  for prop in self._properties_properties:
724  if prop == MeshProperty.ADJACENT_FACE:
725  adj_face = self._symbol_table_symbol_table.find_or_create_array(
726  "adjacent_face", 2, ScalarType.Intrinsic.INTEGER,
727  tag="adjacent_face").name
728  # 'nfaces_re_h' will have been declared by the
729  # DynReferenceElement class.
730  dimension = self._symbol_table_symbol_table.\
731  find_or_create_integer_symbol("nfaces_re_h",
732  tag="nfaces_re_h").name
733  parent.add(
734  DeclGen(
735  parent, datatype="integer",
736  kind=api_config.default_kind["integer"],
737  dimension=dimension,
738  intent="in", entity_decls=[adj_face]))
739  elif prop == MeshProperty.NCELL_2D:
740  ncell_2d = self._symbol_table_symbol_table.find_or_create_integer_symbol(
741  "ncell_2d", tag="ncell_2d")
742  parent.add(
743  DeclGen(parent, datatype="integer",
744  kind=api_config.default_kind["integer"],
745  intent="in", entity_decls=[ncell_2d.name]))
746  else:
747  raise InternalError(
748  f"Found unsupported mesh property '{prop}' when generating"
749  f" declarations for kernel stub. Only members of the "
750  f"MeshProperty Enum are permitted ({list(MeshProperty)})")
751 
752  def initialise(self, parent):
753  '''
754  Creates the f2pygen nodes for the initialisation of properties of
755  the mesh.
756 
757  :param parent: node in the f2pygen tree to which to add statements.
758  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
759 
760  :raises InternalError: if an unsupported mesh property is encountered.
761 
762  '''
763  const = LFRicConstants()
764  # Since colouring is applied via transformations, we have to check for
765  # it now, rather than when this class was first constructed.
766  need_colour_limits = False
767  need_colour_halo_limits = False
768  for call in self._calls_calls:
769  if call.is_coloured() and not call.is_intergrid:
770  loop = call.parent.parent
771  # Record whether or not this coloured loop accesses the halo.
772  if loop.upper_bound_name in const.HALO_ACCESS_LOOP_BOUNDS:
773  need_colour_halo_limits = True
774  else:
775  need_colour_limits = True
776 
777  if not self._properties_properties and not (need_colour_limits or
778  need_colour_halo_limits):
779  # If no mesh properties are required and there's no colouring
780  # (which requires a mesh object to lookup loop bounds) then we
781  # need do nothing.
782  return
783 
784  parent.add(CommentGen(parent, ""))
785  parent.add(CommentGen(parent, " Initialise mesh properties"))
786  parent.add(CommentGen(parent, ""))
787 
788  mesh = self._symbol_table_symbol_table.find_or_create_tag("mesh").name
789 
790  for prop in self._properties_properties:
791  if prop == MeshProperty.ADJACENT_FACE:
792  adj_face = self._symbol_table_symbol_table.find_or_create_tag(
793  "adjacent_face").name
794  parent.add(AssignGen(parent, pointer=True, lhs=adj_face,
795  rhs=mesh+"%get_adjacent_face()"))
796 
797  elif prop == MeshProperty.NCELL_2D_NO_HALOS:
798  name = self._symbol_table_symbol_table.find_or_create_integer_symbol(
799  "ncell_2d_no_halos", tag="ncell_2d_no_halos").name
800  parent.add(AssignGen(parent, lhs=name,
801  rhs=mesh+"%get_last_edge_cell()"))
802 
803  elif prop == MeshProperty.NCELL_2D:
804  name = self._symbol_table_symbol_table.find_or_create_integer_symbol(
805  "ncell_2d", tag="ncell_2d").name
806  parent.add(AssignGen(parent, lhs=name,
807  rhs=mesh+"%get_ncells_2d()"))
808  else:
809  raise InternalError(
810  f"Found unsupported mesh property '{str(prop)}' when "
811  f"generating initialisation code. Only members of the "
812  f"MeshProperty Enum are permitted ({list(MeshProperty)})")
813 
814  if need_colour_halo_limits:
815  lhs = self._symbol_table_symbol_table.find_or_create_tag(
816  "last_halo_cell_all_colours").name
817  rhs = f"{mesh}%get_last_halo_cell_all_colours()"
818  parent.add(AssignGen(parent, lhs=lhs, rhs=rhs))
819  if need_colour_limits:
820  lhs = self._symbol_table_symbol_table.find_or_create_tag(
821  "last_edge_cell_all_colours").name
822  rhs = f"{mesh}%get_last_edge_cell_all_colours()"
823  parent.add(AssignGen(parent, lhs=lhs, rhs=rhs))
824 
825 
827  '''
828  Holds all information on the properties of the Reference Element
829  required by an Invoke or a Kernel stub.
830 
831  :param node: Kernel or Invoke for which to manage Reference-Element \
832  properties.
833  :type node: :py:class:`psyclone.domain.lfric.LFRicKern` or \
834  :py:class:`psyclone.dynamo0p3.LFRicInvoke`
835 
836  :raises InternalError: if an unsupported reference-element property \
837  is encountered.
838 
839  '''
840  # pylint: disable=too-many-instance-attributes
841  def __init__(self, node):
842  # pylint: disable=too-many-branches, too-many-statements
843  super().__init__(node)
844 
845  # Create a union of the reference-element properties required by all
846  # kernels in this invoke. Use a list to preserve the order in the
847  # kernel metadata (in the case of a kernel stub) and remove duplicate
848  # entries by using OrderedDict.
849  self._properties_properties = []
850  self._nfaces_h_required_nfaces_h_required = False
851 
852  for call in self._calls_calls:
853  if call.reference_element:
854  self._properties_properties.extend(call.reference_element.properties)
855  if call.mesh and call.mesh.properties:
856  # If a kernel requires a property of the mesh then it will
857  # also require the number of horizontal faces of the
858  # reference element.
859  self._nfaces_h_required_nfaces_h_required = True
860 
861  if not (self._properties_properties or self._nfaces_h_required_nfaces_h_required):
862  return
863 
864  if self._properties_properties:
865  self._properties_properties = list(OrderedDict.fromkeys(self._properties_properties))
866 
867  symtab = self._symbol_table_symbol_table
868 
869  # Create and store a name for the reference element object
870  self._ref_elem_name_ref_elem_name = \
871  symtab.find_or_create_tag("reference_element").name
872 
873  # Initialise names for the properties of the reference element object:
874  # Number of horizontal/vertical/all faces,
875  self._nfaces_h_symbol_nfaces_h_symbol = None
876  self._nfaces_v_symbol_nfaces_v_symbol = None
877  self._nfaces_symbol_nfaces_symbol = None
878  # Horizontal normals to faces,
879  self._horiz_face_normals_symbol_horiz_face_normals_symbol = None
880  self._horiz_face_out_normals_symbol_horiz_face_out_normals_symbol = None
881  # Vertical normals to faces,
882  self._vert_face_normals_symbol_vert_face_normals_symbol = None
883  self._vert_face_out_normals_symbol_vert_face_out_normals_symbol = None
884  # All normals to faces.
885  self._face_normals_symbol_face_normals_symbol = None
886  self._face_out_normals_symbol_face_out_normals_symbol = None
887 
888  # Store argument properties for kernel calls and stub declarations
889  # and argument list
890  self._arg_properties_arg_properties = OrderedDict()
891 
892  # Populate and check reference element properties
893  # Provide no. of horizontal faces if required
894  if (RefElementMetaData.Property.NORMALS_TO_HORIZONTAL_FACES
895  in self._properties_properties or
896  RefElementMetaData.Property.OUTWARD_NORMALS_TO_HORIZONTAL_FACES
897  in self._properties_properties or
898  self._nfaces_h_required_nfaces_h_required):
899  self._nfaces_h_symbol_nfaces_h_symbol = symtab.find_or_create_integer_symbol(
900  "nfaces_re_h", tag="nfaces_re_h")
901  # Provide no. of vertical faces if required
902  if (RefElementMetaData.Property.NORMALS_TO_VERTICAL_FACES
903  in self._properties_properties or
904  RefElementMetaData.Property.OUTWARD_NORMALS_TO_VERTICAL_FACES
905  in self._properties_properties):
906  self._nfaces_v_symbol_nfaces_v_symbol = symtab.find_or_create_integer_symbol(
907  "nfaces_re_v", tag="nfaces_re_v")
908  # Provide no. of all faces if required
909  if (RefElementMetaData.Property.NORMALS_TO_FACES
910  in self._properties_properties or
911  RefElementMetaData.Property.OUTWARD_NORMALS_TO_FACES
912  in self._properties_properties):
913  self._nfaces_symbol_nfaces_symbol = symtab.find_or_create_integer_symbol(
914  "nfaces_re", tag="nfaces_re")
915 
916  # Now the arrays themselves, in the order specified in the
917  # kernel metadata (in the case of a kernel stub)
918  for prop in self._properties_properties:
919  # Provide horizontal normals to faces
920  if prop == RefElementMetaData.Property.NORMALS_TO_HORIZONTAL_FACES:
921  name = "normals_to_horiz_faces"
922  self._horiz_face_normals_symbol_horiz_face_normals_symbol = \
923  symtab.find_or_create_array(name, 2,
924  ScalarType.Intrinsic.REAL,
925  tag=name)
926  if self._horiz_face_normals_symbol_horiz_face_normals_symbol not in self._arg_properties_arg_properties:
927  self._arg_properties_arg_properties[self._horiz_face_normals_symbol_horiz_face_normals_symbol] = \
928  self._nfaces_h_symbol_nfaces_h_symbol
929  # Provide horizontal normals to "outward" faces
930  elif prop == (RefElementMetaData.Property.
931  OUTWARD_NORMALS_TO_HORIZONTAL_FACES):
932  name = "out_normals_to_horiz_faces"
933  self._horiz_face_out_normals_symbol_horiz_face_out_normals_symbol = \
934  symtab.find_or_create_array(name, 2,
935  ScalarType.Intrinsic.REAL,
936  tag=name)
937  if self._horiz_face_out_normals_symbol_horiz_face_out_normals_symbol not in \
938  self._arg_properties_arg_properties:
939  self._arg_properties_arg_properties[self._horiz_face_out_normals_symbol_horiz_face_out_normals_symbol] \
940  = self._nfaces_h_symbol_nfaces_h_symbol
941  elif prop == (RefElementMetaData.Property.
942  NORMALS_TO_VERTICAL_FACES):
943  name = "normals_to_vert_faces"
944  self._vert_face_normals_symbol_vert_face_normals_symbol = \
945  symtab.find_or_create_array(name, 2,
946  ScalarType.Intrinsic.REAL,
947  tag=name)
948  if self._vert_face_normals_symbol_vert_face_normals_symbol not in self._arg_properties_arg_properties:
949  self._arg_properties_arg_properties[self._vert_face_normals_symbol_vert_face_normals_symbol] = \
950  self._nfaces_v_symbol_nfaces_v_symbol
951  # Provide vertical normals to "outward" faces
952  elif prop == (RefElementMetaData.Property.
953  OUTWARD_NORMALS_TO_VERTICAL_FACES):
954  name = "out_normals_to_vert_faces"
955  self._vert_face_out_normals_symbol_vert_face_out_normals_symbol = \
956  symtab.find_or_create_array(name, 2,
957  ScalarType.Intrinsic.REAL,
958  tag=name)
959  if self._vert_face_out_normals_symbol_vert_face_out_normals_symbol not in \
960  self._arg_properties_arg_properties:
961  self._arg_properties_arg_properties[self._vert_face_out_normals_symbol_vert_face_out_normals_symbol] \
962  = self._nfaces_v_symbol_nfaces_v_symbol
963  # Provide normals to all faces
964  elif prop == RefElementMetaData.Property.NORMALS_TO_FACES:
965  name = "normals_to_faces"
966  self._face_normals_symbol_face_normals_symbol = \
967  symtab.find_or_create_array(name, 2,
968  ScalarType.Intrinsic.REAL,
969  tag=name)
970  if self._face_normals_symbol_face_normals_symbol not in self._arg_properties_arg_properties:
971  self._arg_properties_arg_properties[self._face_normals_symbol_face_normals_symbol] = \
972  self._nfaces_symbol_nfaces_symbol
973  # Provide vertical normals to all "outward" faces
974  elif prop == RefElementMetaData.Property.OUTWARD_NORMALS_TO_FACES:
975  name = "out_normals_to_faces"
976  self._face_out_normals_symbol_face_out_normals_symbol = \
977  symtab.find_or_create_array(name, 2,
978  ScalarType.Intrinsic.REAL,
979  tag=name)
980  if self._face_out_normals_symbol_face_out_normals_symbol not in \
981  self._arg_properties_arg_properties:
982  self._arg_properties_arg_properties[self._face_out_normals_symbol_face_out_normals_symbol] = \
983  self._nfaces_symbol_nfaces_symbol
984  else:
985  all_props = [str(sprop)
986  for sprop in RefElementMetaData.Property]
987  raise InternalError(
988  f"Unsupported reference-element property ('{prop}') "
989  f"found when generating arguments for kernel "
990  f"'{self._kernel.name}'. Supported properties are: "
991  f"{all_props}")
992 
993  def kern_args(self):
994  '''
995  :returns: the argument list for kernel call/stub arguments.
996  :rtype: List[str]
997 
998  '''
999  argdict = self._arg_properties_arg_properties
1000  # Remove duplicate "nfaces" by using OrderedDict
1001  nfaces = list(OrderedDict.fromkeys(argdict.values()))
1002  kern_args = nfaces + list(argdict.keys())
1003  return [sym.name for sym in kern_args]
1004 
1006  '''
1007  :returns: the argument symbol list for kernel call/stub arguments.
1008  :rtype: List[:py:class:`psyclone.psyir.symbols.Symbol`]
1009 
1010  '''
1011  argdict = self._arg_properties_arg_properties
1012  # Remove duplicate "nfaces" by using OrderedDict
1013  nfaces = list(OrderedDict.fromkeys(argdict.values()))
1014  return nfaces + list(argdict.keys())
1015 
1016  def _invoke_declarations(self, parent):
1017  '''
1018  Create the necessary declarations for the variables needed in order
1019  to provide properties of the reference element in a Kernel call.
1020 
1021  :param parent: node in the f2pygen AST to which to add declarations.
1022  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1023 
1024  '''
1025  # Get the list of the required scalars
1026  if self._properties_properties:
1027  # remove duplicates with an OrderedDict
1028  nface_vars = list(OrderedDict.fromkeys(
1029  self._arg_properties_arg_properties.values()))
1030  elif self._nfaces_h_required_nfaces_h_required:
1031  # We only need the number of 'horizontal' faces
1032  nface_vars = [self._nfaces_h_symbol_nfaces_h_symbol]
1033  else:
1034  # No reference-element properties required
1035  return
1036 
1037  api_config = Config.get().api_conf("dynamo0.3")
1038  const = LFRicConstants()
1039 
1040  refelem_type = const.REFELEMENT_TYPE_MAP["refelement"]["type"]
1041  refelem_mod = const.REFELEMENT_TYPE_MAP["refelement"]["module"]
1042  parent.add(UseGen(parent, name=refelem_mod, only=True,
1043  funcnames=[refelem_type]))
1044  parent.add(
1045  TypeDeclGen(parent, pointer=True, is_class=True,
1046  datatype=refelem_type,
1047  entity_decls=[self._ref_elem_name_ref_elem_name + " => null()"]))
1048 
1049  parent.add(DeclGen(parent, datatype="integer",
1050  kind=api_config.default_kind["integer"],
1051  entity_decls=[var.name for var in nface_vars]))
1052 
1053  if not self._properties_properties:
1054  # We only need the number of horizontal faces so we're done
1055  return
1056 
1057  # Declare the necessary arrays
1058  array_decls = [f"{sym.name}(:,:)"
1059  for sym in self._arg_properties_arg_properties.keys()]
1060  my_kind = api_config.default_kind["real"]
1061  parent.add(DeclGen(parent, datatype="real", kind=my_kind,
1062  allocatable=True, entity_decls=array_decls))
1063  # Ensure the necessary kind parameter is imported.
1064  const_mod = const.UTILITIES_MOD_MAP["constants"]["module"]
1065  const_mod_uses = self._invoke_invoke.invokes.psy.infrastructure_modules[
1066  const_mod]
1067  const_mod_uses.add(my_kind)
1068 
1069  def _stub_declarations(self, parent):
1070  '''
1071  Create the necessary declarations for the variables needed in order
1072  to provide properties of the reference element in a Kernel stub.
1073 
1074  :param parent: node in the f2pygen AST to which to add declarations.
1075  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1076 
1077  '''
1078  api_config = Config.get().api_conf("dynamo0.3")
1079 
1080  if not (self._properties_properties or self._nfaces_h_required_nfaces_h_required):
1081  return
1082 
1083  # Declare the necessary scalars (duplicates are ignored by parent.add)
1084  scalars = list(self._arg_properties_arg_properties.values())
1085  nfaces_h = self._symbol_table_symbol_table.find_or_create_integer_symbol(
1086  "nfaces_re_h", tag="nfaces_re_h")
1087  if self._nfaces_h_required_nfaces_h_required and nfaces_h not in scalars:
1088  scalars.append(nfaces_h)
1089 
1090  for nface in scalars:
1091  parent.add(DeclGen(parent, datatype="integer",
1092  kind=api_config.default_kind["integer"],
1093  intent="in", entity_decls=[nface.name]))
1094 
1095  # Declare the necessary arrays
1096  for arr, sym in self._arg_properties_arg_properties.items():
1097  dimension = f"3,{sym.name}"
1098  parent.add(DeclGen(parent, datatype="real",
1099  kind=api_config.default_kind["real"],
1100  intent="in", dimension=dimension,
1101  entity_decls=[arr.name]))
1102 
1103  def initialise(self, parent):
1104  '''
1105  Creates the f2pygen nodes representing the necessary initialisation
1106  code for properties of the reference element.
1107 
1108  :param parent: node in the f2pygen tree to which to add statements.
1109  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1110 
1111  '''
1112  if not (self._properties_properties or self._nfaces_h_required_nfaces_h_required):
1113  return
1114 
1115  parent.add(CommentGen(parent, ""))
1116  parent.add(
1117  CommentGen(parent,
1118  " Get the reference element and query its properties"))
1119  parent.add(CommentGen(parent, ""))
1120 
1121  mesh_obj_name = self._symbol_table_symbol_table.find_or_create_tag("mesh").name
1122  parent.add(AssignGen(parent, pointer=True, lhs=self._ref_elem_name_ref_elem_name,
1123  rhs=mesh_obj_name+"%get_reference_element()"))
1124 
1125  if self._nfaces_h_symbol_nfaces_h_symbol:
1126  parent.add(
1127  AssignGen(parent, lhs=self._nfaces_h_symbol_nfaces_h_symbol.name,
1128  rhs=self._ref_elem_name_ref_elem_name +
1129  "%get_number_horizontal_faces()"))
1130  if self._nfaces_v_symbol_nfaces_v_symbol:
1131  parent.add(
1132  AssignGen(
1133  parent, lhs=self._nfaces_v_symbol_nfaces_v_symbol.name,
1134  rhs=self._ref_elem_name_ref_elem_name + "%get_number_vertical_faces()"))
1135 
1136  if self._nfaces_symbol_nfaces_symbol:
1137  parent.add(
1138  AssignGen(
1139  parent, lhs=self._nfaces_symbol_nfaces_symbol.name,
1140  rhs=self._ref_elem_name_ref_elem_name + "%get_number_faces()"))
1141 
1142  if self._horiz_face_normals_symbol_horiz_face_normals_symbol:
1143  parent.add(
1144  CallGen(parent,
1145  name=f"{self._ref_elem_name}%get_normals_to_"
1146  f"horizontal_faces("
1147  f"{self._horiz_face_normals_symbol.name})"))
1148 
1149  if self._horiz_face_out_normals_symbol_horiz_face_out_normals_symbol:
1150  parent.add(
1151  CallGen(
1152  parent,
1153  name=f"{self._ref_elem_name}%get_outward_normals_to_"
1154  f"horizontal_faces("
1155  f"{self._horiz_face_out_normals_symbol.name})"))
1156 
1157  if self._vert_face_normals_symbol_vert_face_normals_symbol:
1158  parent.add(
1159  CallGen(parent,
1160  name=f"{self._ref_elem_name}%get_normals_to_vertical_"
1161  f"faces({self._vert_face_normals_symbol.name})"))
1162 
1163  if self._vert_face_out_normals_symbol_vert_face_out_normals_symbol:
1164  parent.add(
1165  CallGen(
1166  parent,
1167  name=f"{self._ref_elem_name}%get_outward_normals_to_"
1168  f"vertical_faces"
1169  f"({self._vert_face_out_normals_symbol.name})"))
1170 
1171  if self._face_normals_symbol_face_normals_symbol:
1172  parent.add(
1173  CallGen(parent,
1174  name=f"{self._ref_elem_name}%get_normals_to_faces"
1175  f"({self._face_normals_symbol.name})"))
1176 
1177  if self._face_out_normals_symbol_face_out_normals_symbol:
1178  parent.add(
1179  CallGen(
1180  parent,
1181  name=f"{self._ref_elem_name}%get_outward_normals_to_"
1182  f"faces({self._face_out_normals_symbol.name})"))
1183 
1184 
1186  '''
1187  Handles the declaration and initialisation of all function-space-related
1188  quantities required by an Invoke.
1189 
1190  :param invoke: the Invoke or Kernel object.
1191  '''
1192  def __init__(self, kern_or_invoke):
1193  super().__init__(kern_or_invoke)
1194 
1195  if self._invoke_invoke:
1196  self._function_spaces_function_spaces = self._invoke_invoke.unique_fss()[:]
1197  else:
1198  self._function_spaces_function_spaces = self._calls_calls[0].arguments.unique_fss
1199 
1200  self._var_list_var_list = []
1201 
1202  # Loop over all unique function spaces used by our kernel(s)
1203  for function_space in self._function_spaces_function_spaces:
1204 
1205  # We need ndf for a space if a kernel operates on cell-columns,
1206  # has a field or operator on that space and is not a
1207  # CMA kernel performing a matrix-matrix operation.
1208  if self._invoke_invoke and not self._dofs_only_dofs_only or \
1209  self._kernel_kernel and self._kernel_kernel.cma_operation != "matrix-matrix":
1210  self._var_list_var_list.append(function_space.ndf_name)
1211 
1212  # If there is a field on this space then add undf to list
1213  # to declare later. However, if the invoke contains only
1214  # kernels that operate on dofs and distributed memory is
1215  # enabled then the number of dofs is obtained from the
1216  # field proxy and undf is not required.
1217  if self._invoke_invoke and self._invoke_invoke.field_on_space(function_space):
1218  if not (self._dofs_only_dofs_only and Config.get().distributed_memory):
1219  self._var_list_var_list.append(function_space.undf_name)
1220  elif self._kernel_kernel and \
1221  function_space.field_on_space(self._kernel_kernel.arguments):
1222  self._var_list_var_list.append(function_space.undf_name)
1223 
1224  def _stub_declarations(self, parent):
1225  '''
1226  Add function-space-related declarations to a Kernel stub.
1227 
1228  :param parent: the node in the f2pygen AST representing the kernel \
1229  stub to which to add declarations.
1230  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1231 
1232  '''
1233  api_config = Config.get().api_conf("dynamo0.3")
1234 
1235  if self._var_list_var_list:
1236  # Declare ndf and undf for all function spaces
1237  parent.add(DeclGen(parent, datatype="integer",
1238  kind=api_config.default_kind["integer"],
1239  intent="in", entity_decls=self._var_list_var_list))
1240 
1241  def _invoke_declarations(self, parent):
1242  '''
1243  Add function-space-related declarations to a PSy-layer routine.
1244 
1245  :param parent: the node in the f2pygen AST to which to add \
1246  declarations.
1247  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1248 
1249  '''
1250  api_config = Config.get().api_conf("dynamo0.3")
1251 
1252  if self._var_list_var_list:
1253  # Declare ndf and undf for all function spaces
1254  parent.add(DeclGen(parent, datatype="integer",
1255  kind=api_config.default_kind["integer"],
1256  entity_decls=self._var_list_var_list))
1257 
1258  def initialise(self, parent):
1259  '''
1260  Create the code that initialises function-space quantities.
1261 
1262  :param parent: the node in the f2pygen AST representing the PSy-layer \
1263  routine.
1264  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1265 
1266  '''
1267  # Loop over all unique function spaces used by the kernels in
1268  # the invoke
1269  for function_space in self._function_spaces_function_spaces:
1270  # Initialise information associated with this function space.
1271  # If we have 1+ kernels that operate on cell-columns then we
1272  # will need ndf and undf. If we don't then we only need undf
1273  # (for the upper bound of the loop over dofs) if we're not
1274  # doing DM.
1275  if not (self._dofs_only_dofs_only and Config.get().distributed_memory):
1276  parent.add(CommentGen(parent, ""))
1277  parent.add(CommentGen(parent,
1278  " Initialise number of DoFs for " +
1279  function_space.mangled_name))
1280  parent.add(CommentGen(parent, ""))
1281 
1282  # Find argument proxy name used to dereference the argument
1283  arg = self._invoke_invoke.arg_for_funcspace(function_space)
1284  name = arg.proxy_name_indexed
1285  # Initialise ndf for this function space.
1286  if not self._dofs_only_dofs_only:
1287  ndf_name = function_space.ndf_name
1288  parent.add(AssignGen(parent, lhs=ndf_name,
1289  rhs=name +
1290  "%" + arg.ref_name(function_space) +
1291  "%get_ndf()"))
1292  # If there is a field on this space then initialise undf
1293  # for this function space. However, if the invoke contains
1294  # only kernels that operate on dofs and distributed
1295  # memory is enabled then the number of dofs is obtained
1296  # from the field proxy and undf is not required.
1297  if not (self._dofs_only_dofs_only and Config.get().distributed_memory):
1298  if self._invoke_invoke.field_on_space(function_space):
1299  undf_name = function_space.undf_name
1300  parent.add(AssignGen(parent, lhs=undf_name,
1301  rhs=name + "%" +
1302  arg.ref_name(function_space) +
1303  "%get_undf()"))
1304 
1305 
1307  '''
1308  Handles all proxy-related declarations and initialisation. Unlike other
1309  sub-classes of LFRicCollection, we do not have to handle Kernel-stub
1310  generation since Kernels know nothing about proxies.
1311 
1312  An instance of this class is instantiated for each Invoke before the
1313  PSy Layer is constructed. For each unique field or operator argument to
1314  a kernel in the Invoke it:
1315 
1316  * Creates a DataSymbol for the corresponding proxy;
1317  * Creates a DataSymbol for the pointer to the data array accessed via
1318  the proxy. If the argument is a field vector then a DataSymbol is
1319  created for each component of the vector;
1320  * Tags that DataSymbol so that the correct symbol can always be looked
1321  up, irrespective of any name clashes;
1322 
1323  Note that since the Fortran standard forbids (Note 12.34 in the
1324  Fortran2008 standard) aliasing of effective arguments that are written to,
1325  the set of unique kernel arguments must refer to unique memory locations
1326  or to those that are read only.
1327 
1328  '''
1329  def __init__(self, node):
1330  super().__init__(node)
1331  const = LFRicConstants()
1332  real_field_args = self._invoke_invoke.unique_declarations(
1333  argument_types=const.VALID_FIELD_NAMES,
1334  intrinsic_type=const.MAPPING_DATA_TYPES["gh_real"])
1335  int_field_args = self._invoke_invoke.unique_declarations(
1336  argument_types=const.VALID_FIELD_NAMES,
1337  intrinsic_type=const.MAPPING_DATA_TYPES["gh_integer"])
1338  op_args = self._invoke_invoke.unique_declarations(
1339  argument_types=const.VALID_OPERATOR_NAMES)
1340 
1341  # We put precision Symbols in the Container symbol table.
1342  ctable = self._invoke_invoke.schedule.parent.symbol_table
1343 
1344  for arg in real_field_args + int_field_args + op_args:
1345  # Create symbols that we will associate with the internal
1346  # data arrays of fields, field vectors and LMA operators.
1347  if arg.argument_type == "gh_columnwise_operator":
1348  # CMA operators are handled by the DynCMAOperators class.
1349  continue
1350  ctable.add_lfric_precision_symbol(arg.precision)
1351  intrinsic_type = "integer" if arg in int_field_args else "real"
1352  suffix = const.ARG_TYPE_SUFFIX_MAPPING[arg.argument_type]
1353  if arg.vector_size > 1:
1354  for idx in range(1, arg.vector_size+1):
1355  # Make sure we're going to create a Symbol with a unique
1356  # name.
1357  new_name = self._symbol_table_symbol_table.next_available_name(
1358  f"{arg.name}_{idx}_{suffix}")
1359  tag = f"{arg.name}_{idx}:{suffix}"
1360  # The data for a field lives in a rank-1 array.
1361  self._add_symbol_add_symbol(new_name, tag, intrinsic_type, arg, 1)
1362  else:
1363  # Make sure we're going to create a Symbol with a unique
1364  # name (since this is hardwired into the
1365  # UnsupportedFortranType).
1366  new_name = self._symbol_table_symbol_table.next_available_name(
1367  f"{arg.name}_{suffix}")
1368  tag = f"{arg.name}:{suffix}"
1369  # The data for an operator lives in a rank-3 array.
1370  rank = 1 if arg not in op_args else 3
1371  self._add_symbol_add_symbol(new_name, tag, intrinsic_type, arg, rank)
1372 
1373  def _add_symbol(self, name, tag, intrinsic_type, arg, rank):
1374  '''
1375  Creates a new DataSymbol representing either an LFRic field or
1376  operator and adds it to the SymbolTable associated with this class.
1377  The Symbol is of UnsupportedFortranType because it is a pointer
1378  to the internal data array and the PSyIR does not support pointers. The
1379  remainder of the type information is fully supplied in the
1380  `partial_datatype` property of the UnsupportedFortranType.
1381  The supplied Symbol name is assumed not to already exist in the
1382  SymbolTable (e.g. it is obtained with the `next_available_name` method
1383  of SymbolTable) because it is used in constructing the
1384  UnsupportedFortranType which must be done before the Symbol is created.
1385 
1386  :param str name: the name of the new Symbol.
1387  :param str tag: the tag to associate with the new Symbol.
1388  :param str intrinsic_type: whether the Symbol represents "real" or
1389  "integer" data.
1390  :param arg: the metadata description of the associated kernel argument.
1391  :type arg: :py:class:`psyclone.dynamo0p3.DynKernelArgument`
1392  :param int rank: the rank of the array represented by the Symbol.
1393 
1394  '''
1395  if intrinsic_type == "real":
1396  lfric_type = "LFRicRealScalarDataType"
1397  else:
1398  lfric_type = "LFRicIntegerScalarDataType"
1399  precision = LFRicConstants().precision_for_type(arg.data_type)
1400  array_type = ArrayType(
1401  LFRicTypes(lfric_type)(precision),
1402  [ArrayType.Extent.DEFERRED]*rank)
1403 
1404  # Since the PSyIR doesn't have the pointer concept, we have
1405  # to have an UnsupportedFortranType.
1406  index_str = ",".join(rank*[":"])
1407  dtype = UnsupportedFortranType(
1408  f"{intrinsic_type}(kind={arg.precision}), pointer, "
1409  f"dimension({index_str}) :: {name} => null()",
1410  partial_datatype=array_type)
1411  try:
1412  self._symbol_table_symbol_table.new_symbol(name,
1413  symbol_type=DataSymbol,
1414  datatype=dtype,
1415  tag=tag)
1416  except KeyError:
1417  # The tag already exists and therefore we don't need to do
1418  # anything. This can happen if the Symbol Table has already
1419  # been populated by a previous call to this constructor. Even if
1420  # this is not the case, within a single Invoke we can have user-
1421  # supplied kernels that accept a full field-vector as argument
1422  # but also individual components of that vector might
1423  # be passed to Builtins. Therefore a clash with an
1424  # existing tag may occur which we can safely ignore.
1425  pass
1426 
1427  def _invoke_declarations(self, parent):
1428  '''
1429  Insert declarations of all proxy-related quantities into the PSy layer.
1430 
1431  :param parent: the node in the f2pygen AST representing the PSy- \
1432  layer routine.
1433  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1434 
1435  '''
1436  const = LFRicConstants()
1437  const_mod = const.UTILITIES_MOD_MAP["constants"]["module"]
1438  table = self._symbol_table_symbol_table
1439 
1440  # Declarations of real and integer field proxies
1441 
1442  # Filter field arguments by intrinsic type
1443  real_field_args = self._invoke_invoke.unique_declarations(
1444  argument_types=const.VALID_FIELD_NAMES,
1445  intrinsic_type=const.MAPPING_DATA_TYPES["gh_real"])
1446  int_field_args = self._invoke_invoke.unique_declarations(
1447  argument_types=const.VALID_FIELD_NAMES,
1448  intrinsic_type=const.MAPPING_DATA_TYPES["gh_integer"])
1449 
1450  # Create a field argument map that splits the (real and
1451  # integer) fields into their different datatypes for their
1452  # proxy's
1453  field_datatype_map = OrderedDict()
1454  for arg in real_field_args + int_field_args:
1455  try:
1456  field_datatype_map[
1457  (arg.proxy_data_type, arg.module_name)].append(arg)
1458  except KeyError:
1459  # This datatype has not been seen before so create a
1460  # new entry
1461  field_datatype_map[
1462  (arg.proxy_data_type, arg.module_name)] = [arg]
1463 
1464  # Add the Invoke subroutine declarations for the different
1465  # field-type proxies
1466  for (fld_type, fld_mod), args in field_datatype_map.items():
1467  arg_list = [arg.proxy_declaration_name for arg in args]
1468  parent.add(TypeDeclGen(parent, datatype=fld_type,
1469  entity_decls=arg_list))
1470  (self._invoke_invoke.invokes.psy.
1471  infrastructure_modules[fld_mod].add(fld_type))
1472 
1473  # Create declarations for the pointers to the internal
1474  # data arrays.
1475  for arg in args:
1476  (self._invoke_invoke.invokes.psy.infrastructure_modules[const_mod].
1477  add(arg.precision))
1478  suffix = const.ARG_TYPE_SUFFIX_MAPPING[arg.argument_type]
1479  if arg.vector_size > 1:
1480  entity_names = []
1481  for idx in range(1, arg.vector_size+1):
1482  ttext = f"{arg.name}_{idx}:{suffix}"
1483  vsym = table.lookup_with_tag(ttext)
1484  entity_names.append(vsym.name)
1485  else:
1486  ttext = f"{arg.name}:{suffix}"
1487  sym = table.lookup_with_tag(ttext)
1488  entity_names = [sym.name]
1489  if entity_names:
1490  parent.add(
1491  DeclGen(
1492  parent, datatype=arg.intrinsic_type,
1493  kind=arg.precision, dimension=":",
1494  entity_decls=[f"{name} => null()" for
1495  name in entity_names],
1496  pointer=True))
1497 
1498  # Declarations of LMA operator proxies
1499  op_args = self._invoke_invoke.unique_declarations(
1500  argument_types=["gh_operator"])
1501  # Filter operators by their proxy datatype
1502  operators_datatype_map = OrderedDict()
1503  for op_arg in op_args:
1504  try:
1505  operators_datatype_map[op_arg.proxy_data_type].append(op_arg)
1506  except KeyError:
1507  # This proxy datatype has not been seen before so
1508  # create new entry
1509  operators_datatype_map[op_arg.proxy_data_type] = [op_arg]
1510  # Declare the operator proxies
1511  for operator_datatype, operators_list in \
1512  operators_datatype_map.items():
1513  operators_names = [arg.proxy_declaration_name for
1514  arg in operators_list]
1515  parent.add(TypeDeclGen(parent, datatype=operator_datatype,
1516  entity_decls=operators_names))
1517  for arg in operators_list:
1518  name = arg.name
1519  suffix = const.ARG_TYPE_SUFFIX_MAPPING[arg.argument_type]
1520  ttext = f"{name}:{suffix}"
1521  sym = table.lookup_with_tag(ttext)
1522  # Declare the pointer to the stencil array.
1523  parent.add(DeclGen(parent, datatype="real",
1524  kind=arg.precision,
1525  dimension=":,:,:",
1526  entity_decls=[f"{sym.name} => null()"],
1527  pointer=True))
1528  op_mod = operators_list[0].module_name
1529  # Ensure the appropriate derived datatype will be imported.
1530  (self._invoke_invoke.invokes.psy.infrastructure_modules[op_mod].
1531  add(operator_datatype))
1532  # Ensure the appropriate kind parameter will be imported.
1533  (self._invoke_invoke.invokes.psy.infrastructure_modules[const_mod].
1534  add(arg.precision))
1535 
1536  # Declarations of CMA operator proxies
1537  cma_op_args = self._invoke_invoke.unique_declarations(
1538  argument_types=["gh_columnwise_operator"])
1539  cma_op_proxy_decs = [arg.proxy_declaration_name for
1540  arg in cma_op_args]
1541  if cma_op_proxy_decs:
1542  op_type = cma_op_args[0].proxy_data_type
1543  op_mod = cma_op_args[0].module_name
1544  parent.add(TypeDeclGen(parent,
1545  datatype=op_type,
1546  entity_decls=cma_op_proxy_decs))
1547  (self._invoke_invoke.invokes.psy.infrastructure_modules[op_mod].
1548  add(op_type))
1549 
1550  def initialise(self, parent):
1551  '''
1552  Insert code into the PSy layer to initialise all necessary proxies.
1553 
1554  :param parent: node in the f2pygen AST representing the PSy-layer
1555  routine.
1556  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1557 
1558  :raises InternalError: if a kernel argument of an unrecognised type
1559  is encountered.
1560 
1561  '''
1562  parent.add(CommentGen(parent, ""))
1563  parent.add(CommentGen(parent,
1564  " Initialise field and/or operator proxies"))
1565  parent.add(CommentGen(parent, ""))
1566  for arg in self._invoke_invoke.psy_unique_vars:
1567  # We don't have proxies for scalars
1568  if arg.is_scalar:
1569  continue
1570 
1571  const = LFRicConstants()
1572  suffix = const.ARG_TYPE_SUFFIX_MAPPING[arg.argument_type]
1573 
1574  if arg.vector_size > 1:
1575  # the range function below returns values from
1576  # 1 to the vector size which is what we
1577  # require in our Fortran code
1578  for idx in range(1, arg.vector_size+1):
1579  parent.add(
1580  AssignGen(parent,
1581  lhs=arg.proxy_name+"("+str(idx)+")",
1582  rhs=arg.name+"("+str(idx)+")%get_proxy()"))
1583  name = self._symbol_table_symbol_table.lookup_with_tag(
1584  f"{arg.name}_{idx}:{suffix}").name
1585  parent.add(
1586  AssignGen(parent,
1587  lhs=name,
1588  rhs=f"{arg.proxy_name}({idx})%data",
1589  pointer=True))
1590  else:
1591  parent.add(AssignGen(parent, lhs=arg.proxy_name,
1592  rhs=arg.name+"%get_proxy()"))
1593  if arg.is_field:
1594  name = self._symbol_table_symbol_table.lookup_with_tag(
1595  f"{arg.name}:{suffix}").name
1596  parent.add(
1597  AssignGen(parent,
1598  lhs=name,
1599  rhs=f"{arg.proxy_name}%data",
1600  pointer=True))
1601  elif arg.is_operator:
1602  if arg.argument_type == "gh_columnwise_operator":
1603  # CMA operator arguments are handled in DynCMAOperators
1604  pass
1605  elif arg.argument_type == "gh_operator":
1606  name = self._symbol_table_symbol_table.lookup_with_tag(
1607  f"{arg.name}:{suffix}").name
1608  parent.add(
1609  AssignGen(parent,
1610  lhs=name,
1611  rhs=f"{arg.proxy_name}%local_stencil",
1612  pointer=True))
1613  else:
1614  raise InternalError(
1615  f"Kernel argument '{arg.name}' is a recognised "
1616  f"operator but its type ('{arg.argument_type}') is"
1617  f" not supported by DynProxies.initialise()")
1618  else:
1619  raise InternalError(
1620  f"Kernel argument '{arg.name}' of type "
1621  f"'{arg.argument_type}' not "
1622  f"handled in DynProxies.initialise()")
1623 
1624 
1626  '''
1627  Handles all entities required by kernels that operate on cell-columns.
1628 
1629  :param kern_or_invoke: the Kernel or Invoke for which to manage cell \
1630  iterators.
1631  :type kern_or_invoke: :py:class:`psyclone.domain.lfric.LFRicKern` or \
1632  :py:class:`psyclone.dynamo0p3.LFRicInvoke`
1633 
1634  : raises GenerationError: if an Invoke has no field or operator arguments.
1635 
1636  '''
1637  def __init__(self, kern_or_invoke):
1638  super().__init__(kern_or_invoke)
1639 
1640  self._nlayers_name_nlayers_name = self._symbol_table_symbol_table.find_or_create_tag(
1641  "nlayers", symbol_type=LFRicTypes("MeshHeightDataSymbol")).name
1642 
1643  # Store a reference to the first field/operator object that
1644  # we can use to look-up nlayers in the PSy layer.
1645  if not self._invoke_invoke:
1646  # We're not generating a PSy layer so we're done here.
1647  return
1648  first_var = None
1649  for var in self._invoke_invoke.psy_unique_vars:
1650  if not var.is_scalar:
1651  first_var = var
1652  break
1653  if not first_var:
1654  raise GenerationError(
1655  "Cannot create an Invoke with no field/operator arguments.")
1656  self._first_var_first_var = first_var
1657 
1658  def _invoke_declarations(self, parent):
1659  '''
1660  Declare entities required for iterating over cells in the Invoke.
1661 
1662  :param parent: the f2pygen node representing the PSy-layer routine.
1663  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1664 
1665  '''
1666  api_config = Config.get().api_conf("dynamo0.3")
1667 
1668  # We only need the number of layers in the mesh if we are calling
1669  # one or more kernels that operate on cell-columns.
1670  if not self._dofs_only_dofs_only:
1671  parent.add(DeclGen(parent, datatype="integer",
1672  kind=api_config.default_kind["integer"],
1673  entity_decls=[self._nlayers_name_nlayers_name]))
1674 
1675  def _stub_declarations(self, parent):
1676  '''
1677  Declare entities required for a kernel stub that operates on
1678  cell-columns.
1679 
1680  :param parent: the f2pygen node representing the Kernel stub.
1681  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1682 
1683  '''
1684  api_config = Config.get().api_conf("dynamo0.3")
1685 
1686  if self._kernel_kernel.cma_operation not in ["apply", "matrix-matrix"]:
1687  parent.add(DeclGen(parent, datatype="integer",
1688  kind=api_config.default_kind["integer"],
1689  intent="in", entity_decls=[self._nlayers_name_nlayers_name]))
1690 
1691  def initialise(self, parent):
1692  '''
1693  Look-up the number of vertical layers in the mesh in the PSy layer.
1694 
1695  :param parent: the f2pygen node representing the PSy-layer routine.
1696  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1697 
1698  '''
1699  if not self._dofs_only_dofs_only:
1700  parent.add(CommentGen(parent, ""))
1701  parent.add(CommentGen(parent, " Initialise number of layers"))
1702  parent.add(CommentGen(parent, ""))
1703  parent.add(AssignGen(
1704  parent, lhs=self._nlayers_name_nlayers_name,
1705  rhs=self._first_var_first_var.proxy_name_indexed + "%" +
1706  self._first_var_first_var.ref_name() + "%get_nlayers()"))
1707 
1708 
1710  '''
1711  Handles all entities associated with Local-Matrix-Assembly Operators.
1712  '''
1713  def _stub_declarations(self, parent):
1714  '''
1715  Declare all LMA-related quantities in a Kernel stub.
1716 
1717  :param parent: the f2pygen node representing the Kernel stub.
1718  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1719 
1720  '''
1721  api_config = Config.get().api_conf("dynamo0.3")
1722 
1723  lma_args = psyGen.args_filter(
1724  self._kernel_kernel.arguments.args, arg_types=["gh_operator"])
1725  if lma_args:
1726  parent.add(DeclGen(parent, datatype="integer",
1727  kind=api_config.default_kind["integer"],
1728  intent="in", entity_decls=["cell"]))
1729  for arg in lma_args:
1730  size = arg.name+"_ncell_3d"
1731  op_dtype = arg.intrinsic_type
1732  op_kind = arg.precision
1733  parent.add(DeclGen(parent, datatype="integer",
1734  kind=api_config.default_kind["integer"],
1735  intent="in", entity_decls=[size]))
1736  ndf_name_to = arg.function_space_to.ndf_name
1737  ndf_name_from = arg.function_space_from.ndf_name
1738  parent.add(DeclGen(parent, datatype=op_dtype, kind=op_kind,
1739  dimension=",".join([ndf_name_to,
1740  ndf_name_from, size]),
1741  intent=arg.intent,
1742  entity_decls=[arg.name]))
1743 
1744  def _invoke_declarations(self, parent):
1745  '''
1746  Declare all LMA-related quantities in a PSy-layer routine.
1747  Note: PSy layer in LFRic does not modify the LMA operator objects.
1748  Hence, their Fortran intents are always "in" (the data updated in the
1749  kernels is only pointed to from the LMA operator object and is thus
1750  not a part of the object).
1751 
1752  :param parent: the f2pygen node representing the PSy-layer routine.
1753  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1754 
1755  '''
1756  # Add the Invoke subroutine argument declarations for operators
1757  op_args = self._invoke_invoke.unique_declarations(
1758  argument_types=["gh_operator"])
1759  # Filter operators by their datatype
1760  operators_datatype_map = OrderedDict()
1761  for op_arg in op_args:
1762  try:
1763  operators_datatype_map[op_arg.data_type].append(op_arg)
1764  except KeyError:
1765  # This datatype has not been seen before so create new entry
1766  operators_datatype_map[op_arg.data_type] = [op_arg]
1767  # Declare the operators
1768  for op_datatype, op_list in operators_datatype_map.items():
1769  operators_names = [arg.declaration_name for arg in op_list]
1770  parent.add(TypeDeclGen(
1771  parent, datatype=op_datatype,
1772  entity_decls=operators_names, intent="in"))
1773  op_mod = op_list[0].module_name
1774  # Record that we will need to import this operator
1775  # datatype from the appropriate infrastructure module
1776  (self._invoke_invoke.invokes.psy.infrastructure_modules[op_mod].
1777  add(op_datatype))
1778 
1779 
1781  '''
1782  Holds all information on the Column-Matrix-Assembly operators
1783  required by an Invoke or Kernel stub.
1784 
1785  :param node: either an Invoke schedule or a single Kernel object.
1786  :type node: :py:class:`psyclone.dynamo0p3.DynSchedule` or \
1787  :py:class:`psyclone.domain.lfric.LFRicKern`
1788 
1789  '''
1790  # The scalar parameters that must be passed along with a CMA operator
1791  # if its 'to' and 'from' spaces are the same
1792  cma_same_fs_params = ["nrow", "bandwidth", "alpha",
1793  "beta", "gamma_m", "gamma_p"]
1794  # The scalar parameters that must be passed along with a CMA operator
1795  # if its 'to' and 'from' spaces are different
1796  cma_diff_fs_params = ["nrow", "ncol", "bandwidth", "alpha",
1797  "beta", "gamma_m", "gamma_p"]
1798 
1799  def __init__(self, node):
1800  super().__init__(node)
1801 
1802  # Look at every kernel call and generate a set of
1803  # the unique CMA operators involved. For each one we create a
1804  # dictionary entry. The key is the name of the CMA argument in the
1805  # PSy layer and the entry is itself another dictionary containing
1806  # two entries: the first 'arg' is the CMA argument object and the
1807  # second 'params' is the list of integer variables associated with
1808  # that CMA operator. The contents of this list depend on whether
1809  # or not the to/from function spaces of the CMA operator are the
1810  # same.
1811  self._cma_ops_cma_ops = OrderedDict()
1812  # You can't index into an OrderedDict so we keep a separate ref
1813  # to the first CMA argument we find.
1814  self._first_cma_arg_first_cma_arg = None
1815  for call in self._calls_calls:
1816  if call.cma_operation:
1817  # Get a list of all of the CMA arguments to this call
1818  cma_args = psyGen.args_filter(
1819  call.arguments.args,
1820  arg_types=["gh_columnwise_operator"])
1821  # Create a dictionary entry for each argument that we
1822  # have not already seen
1823  for arg in cma_args:
1824  if arg.name not in self._cma_ops_cma_ops:
1825  if arg.function_space_to.orig_name != \
1826  arg.function_space_from.orig_name:
1827  self._cma_ops_cma_ops[arg.name] = {
1828  "arg": arg,
1829  "params": self.cma_diff_fs_paramscma_diff_fs_params}
1830  else:
1831  self._cma_ops_cma_ops[arg.name] = {
1832  "arg": arg,
1833  "params": self.cma_same_fs_paramscma_same_fs_params}
1834  self._cma_ops_cma_ops[arg.name]["intent"] = arg.intent
1835  self._cma_ops_cma_ops[arg.name]["datatype"] = \
1836  arg.intrinsic_type
1837  self._cma_ops_cma_ops[arg.name]["kind"] = arg.precision
1838  # Keep a reference to the first CMA argument
1839  if not self._first_cma_arg_first_cma_arg:
1840  self._first_cma_arg_first_cma_arg = arg
1841 
1842  # Create all the necessary Symbols here so that they are available
1843  # without the need to do a 'gen'.
1844  symtab = self._symbol_table_symbol_table
1845  const = LFRicConstants()
1846  suffix = const.ARG_TYPE_SUFFIX_MAPPING["gh_columnwise_operator"]
1847  for op_name in self._cma_ops_cma_ops:
1848  new_name = self._symbol_table_symbol_table.next_available_name(
1849  f"{op_name}_{suffix}")
1850  tag = f"{op_name}:{suffix}"
1851  arg = self._cma_ops_cma_ops[op_name]["arg"]
1852  precision = LFRicConstants().precision_for_type(arg.data_type)
1853  array_type = ArrayType(
1854  LFRicTypes("LFRicRealScalarDataType")(precision),
1855  [ArrayType.Extent.DEFERRED]*3)
1856  index_str = ",".join(3*[":"])
1857  dtype = UnsupportedFortranType(
1858  f"real(kind={arg.precision}), pointer, "
1859  f"dimension({index_str}) :: {new_name} => null()",
1860  partial_datatype=array_type)
1861  symtab.new_symbol(new_name,
1862  symbol_type=DataSymbol,
1863  datatype=dtype,
1864  tag=tag)
1865  # Now the various integer parameters of the operator.
1866  for param in self._cma_ops_cma_ops[op_name]["params"]:
1867  symtab.find_or_create_integer_symbol(
1868  f"{op_name}_{param}", tag=f"{op_name}:{param}:{suffix}")
1869 
1870  def initialise(self, parent):
1871  '''
1872  Generates the calls to the LFRic infrastructure that look-up
1873  the various components of each CMA operator. Adds these as
1874  children of the supplied parent node.
1875 
1876  :param parent: f2pygen node representing the PSy-layer routine.
1877  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1878 
1879  '''
1880  # If we have no CMA operators then we do nothing
1881  if not self._cma_ops_cma_ops:
1882  return
1883 
1884  parent.add(CommentGen(parent, ""))
1885  parent.add(CommentGen(parent,
1886  " Look-up information for each CMA operator"))
1887  parent.add(CommentGen(parent, ""))
1888 
1889  const = LFRicConstants()
1890  suffix = const.ARG_TYPE_SUFFIX_MAPPING["gh_columnwise_operator"]
1891 
1892  for op_name in self._cma_ops_cma_ops:
1893  # First, assign a pointer to the array containing the actual
1894  # matrix.
1895  cma_name = self._symbol_table_symbol_table.lookup_with_tag(
1896  f"{op_name}:{suffix}").name
1897  parent.add(AssignGen(parent, lhs=cma_name, pointer=True,
1898  rhs=self._cma_ops_cma_ops[op_name]["arg"].
1899  proxy_name_indexed+"%columnwise_matrix"))
1900  # Then make copies of the related integer parameters
1901  for param in self._cma_ops_cma_ops[op_name]["params"]:
1902  param_name = self._symbol_table_symbol_table.find_or_create_tag(
1903  f"{op_name}:{param}:{suffix}").name
1904  parent.add(AssignGen(parent, lhs=param_name,
1905  rhs=self._cma_ops_cma_ops[op_name]["arg"].
1906  proxy_name_indexed+"%"+param))
1907 
1908  def _invoke_declarations(self, parent):
1909  '''
1910  Generate the necessary PSy-layer declarations for all column-wise
1911  operators and their associated parameters.
1912  Note: PSy layer in LFRic does not modify the CMA operator objects.
1913  Hence, their Fortran intents are always "in" (the data updated in the
1914  kernels is only pointed to from the column-wise operator object and is
1915  thus not a part of the object).
1916 
1917  :param parent: the f2pygen node representing the PSy-layer routine.
1918  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1919 
1920  '''
1921  api_config = Config.get().api_conf("dynamo0.3")
1922 
1923  # If we have no CMA operators then we do nothing
1924  if not self._cma_ops_cma_ops:
1925  return
1926 
1927  # Add the Invoke subroutine argument declarations for column-wise
1928  # operators
1929  cma_op_args = self._invoke_invoke.unique_declarations(
1930  argument_types=["gh_columnwise_operator"])
1931  # Create a list of column-wise operator names
1932  cma_op_arg_list = [arg.declaration_name for arg in cma_op_args]
1933  if cma_op_arg_list:
1934  op_type = cma_op_args[0].data_type
1935  op_mod = cma_op_args[0].module_name
1936  parent.add(TypeDeclGen(parent,
1937  datatype=op_type,
1938  entity_decls=cma_op_arg_list,
1939  intent="in"))
1940  (self._invoke_invoke.invokes.psy.infrastructure_modules[op_mod].
1941  add(op_type))
1942 
1943  const = LFRicConstants()
1944  suffix = const.ARG_TYPE_SUFFIX_MAPPING["gh_columnwise_operator"]
1945  for op_name in self._cma_ops_cma_ops:
1946  # Declare the operator matrix itself.
1947  tag_name = f"{op_name}:{suffix}"
1948  cma_name = self._symbol_table_symbol_table.lookup_with_tag(tag_name).name
1949  cma_dtype = self._cma_ops_cma_ops[op_name]["datatype"]
1950  cma_kind = self._cma_ops_cma_ops[op_name]["kind"]
1951  parent.add(DeclGen(parent, datatype=cma_dtype,
1952  kind=cma_kind, pointer=True,
1953  dimension=":,:,:",
1954  entity_decls=[f"{cma_name} => null()"]))
1955  const = LFRicConstants()
1956  const_mod = const.UTILITIES_MOD_MAP["constants"]["module"]
1957  const_mod_uses = self._invoke_invoke.invokes.psy. \
1958  infrastructure_modules[const_mod]
1959  # Record that we will need to import the kind of this
1960  # cma operator from the appropriate infrastructure
1961  # module
1962  const_mod_uses.add(cma_kind)
1963 
1964  # Declare the associated integer parameters
1965  param_names = []
1966  for param in self._cma_ops_cma_ops[op_name]["params"]:
1967  name = f"{op_name}_{param}"
1968  tag = f"{op_name}:{param}:{suffix}"
1969  sym = self._symbol_table_symbol_table.find_or_create_integer_symbol(
1970  name, tag=tag)
1971  param_names.append(sym.name)
1972  parent.add(DeclGen(parent, datatype="integer",
1973  kind=api_config.default_kind["integer"],
1974  entity_decls=param_names))
1975 
1976  def _stub_declarations(self, parent):
1977  '''
1978  Generate all necessary declarations for CMA operators being passed to
1979  a Kernel stub.
1980 
1981  :param parent: f2pygen node representing the Kernel stub.
1982  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
1983 
1984  '''
1985  api_config = Config.get().api_conf("dynamo0.3")
1986 
1987  # If we have no CMA operators then we do nothing
1988  if not self._cma_ops_cma_ops:
1989  return
1990 
1991  symtab = self._symbol_table_symbol_table
1992 
1993  # CMA operators always need the current cell index and the number
1994  # of columns in the mesh
1995  parent.add(DeclGen(parent, datatype="integer",
1996  kind=api_config.default_kind["integer"],
1997  intent="in", entity_decls=["cell", "ncell_2d"]))
1998 
1999  const = LFRicConstants()
2000  suffix = const.ARG_TYPE_SUFFIX_MAPPING["gh_columnwise_operator"]
2001 
2002  for op_name in self._cma_ops_cma_ops:
2003  # Declare the associated scalar arguments before the array because
2004  # some of them are used to dimension the latter (and some compilers
2005  # get upset if this ordering is not followed)
2006  _local_args = []
2007  for param in self._cma_ops_cma_ops[op_name]["params"]:
2008  param_name = symtab.find_or_create_tag(
2009  f"{op_name}:{param}:{suffix}",
2010  root_name=f"{op_name}_{param}").name
2011  _local_args.append(param_name)
2012  parent.add(DeclGen(parent, datatype="integer",
2013  kind=api_config.default_kind["integer"],
2014  intent="in", entity_decls=_local_args))
2015  # Declare the array that holds the CMA operator
2016  bandwidth = symtab.find_or_create_tag(
2017  f"{op_name}:bandwidth:{suffix}",
2018  root_name=f"{op_name}_bandwidth").name
2019  nrow = symtab.find_or_create_tag(
2020  f"{op_name}:nrow:{suffix}",
2021  root_name=f"{op_name}_nrow").name
2022  intent = self._cma_ops_cma_ops[op_name]["intent"]
2023  op_dtype = self._cma_ops_cma_ops[op_name]["datatype"]
2024  op_kind = self._cma_ops_cma_ops[op_name]["kind"]
2025  parent.add(DeclGen(parent, datatype=op_dtype, kind=op_kind,
2026  dimension=",".join([bandwidth,
2027  nrow, "ncell_2d"]),
2028  intent=intent, entity_decls=[op_name]))
2029 
2030 
2031 class DynMeshes():
2032  '''
2033  Holds all mesh-related information (including colour maps if
2034  required). If there are no inter-grid kernels then there is only
2035  one mesh object required (when calling kernels with operates_on==domain,
2036  colouring, doing distributed memory or querying the reference element).
2037  However, kernels performing inter-grid operations require multiple mesh
2038  objects as well as mesh maps and other quantities.
2039 
2040  There are two types of inter-grid operation; the first is "prolongation"
2041  where a field on a coarse mesh is mapped onto a fine mesh. The second
2042  is "restriction" where a field on a fine mesh is mapped onto a coarse
2043  mesh.
2044 
2045  :param invoke: the Invoke for which to extract information on all \
2046  required inter-grid operations.
2047  :type invoke: :py:class:`psyclone.dynamo0p3.LFRicInvoke`
2048  :param unique_psy_vars: list of arguments to the PSy-layer routine.
2049  :type unique_psy_vars: list of \
2050  :py:class:`psyclone.dynamo0p3.DynKernelArgument` objects.
2051  '''
2052 
2053  def __init__(self, invoke, unique_psy_vars):
2054  # List of names of unique mesh variables referenced in the Invoke
2055  self._mesh_tag_names_mesh_tag_names = []
2056  # Whether or not the associated Invoke requires colourmap information
2057  self._needs_colourmap_needs_colourmap = False
2058  self._needs_colourmap_halo_needs_colourmap_halo = False
2059  # Keep a reference to the InvokeSchedule so we can check for colouring
2060  # later
2061  self._schedule_schedule = invoke.schedule
2062  self._symbol_table_symbol_table = self._schedule_schedule.symbol_table
2063  # Set used to generate a list of the unique mesh objects
2064  _name_set = set()
2065 
2066  # Find the first non-scalar argument to this PSy layer routine. We
2067  # will use this to look-up the mesh if there are no inter-grid
2068  # kernels in this invoke.
2069  self._first_var_first_var = None
2070  for var in unique_psy_vars:
2071  if not var.is_scalar:
2072  self._first_var_first_var = var
2073  break
2074 
2075  # Loop over all kernel calls in the schedule. Keep a list of
2076  # any non-intergrid kernels so that we can generate a verbose error
2077  # message if necessary.
2078  non_intergrid_kernels = []
2079  has_intergrid = False
2080  for call in self._schedule_schedule.coded_kernels():
2081 
2082  if (call.reference_element.properties or call.mesh.properties or
2083  call.iterates_over == "domain" or call.cma_operation):
2084  _name_set.add("mesh")
2085 
2086  if not call.is_intergrid:
2087  non_intergrid_kernels.append(call)
2088  else:
2089  has_intergrid = True
2090  # Create and store the names of the associated mesh objects
2091  _name_set.add(f"mesh_{call._intergrid_ref.fine.name}")
2092  _name_set.add(f"mesh_{call._intergrid_ref.coarse.name}")
2093 
2094  # If we found a mixture of both inter-grid and non-inter-grid kernels
2095  # then we reject the invoke()
2096  if non_intergrid_kernels and has_intergrid:
2097  raise GenerationError(
2098  f"An invoke containing inter-grid kernels must contain no "
2099  f"other kernel types but kernels "
2100  f"'{''', '''.join([c.name for c in non_intergrid_kernels])}' "
2101  f"in invoke '{invoke.name}' are not inter-grid kernels.")
2102 
2103  # If distributed memory is enabled then we will need at least
2104  # one mesh object if we have one or more kernels that operate
2105  # on cell-columns or are doing redundant computation for a
2106  # kernel that operates on dofs. Since the latter condition
2107  # comes about through the application of a transformation, we
2108  # don't yet know whether or not a mesh is required. Therefore,
2109  # the only solution is to assume that a mesh object is
2110  # required if distributed memory is enabled. We also require a
2111  # mesh object if any of the kernels require properties of
2112  # either the reference element or the mesh. (Colourmaps also
2113  # require a mesh object but that is handled in _colourmap_init().)
2114  if not _name_set and Config.get().distributed_memory:
2115  # We didn't already have a requirement for a mesh so add one now.
2116  _name_set.add("mesh")
2117 
2118  self._add_mesh_symbols_add_mesh_symbols(list(_name_set))
2119 
2120  def _add_mesh_symbols(self, mesh_tags):
2121  '''
2122  Add DataSymbols for the supplied list of mesh names and store the
2123  corresponding list of tags.
2124 
2125  A ContainerSymbol is created for the LFRic mesh module and a TypeSymbol
2126  for the mesh type. If distributed memory is enabled then a DataSymbol
2127  to hold the maximum halo depth is created for each mesh.
2128 
2129  :param mesh_tags: tag names for every mesh object required.
2130  :type mesh_tags: list of str
2131 
2132  '''
2133  if not mesh_tags:
2134  return
2135 
2136  self._mesh_tag_names_mesh_tag_names = sorted(mesh_tags)
2137 
2138  # Look up the names of the module and type for the mesh object
2139  # from the LFRic constants class.
2140  const = LFRicConstants()
2141  mmod = const.MESH_TYPE_MAP["mesh"]["module"]
2142  mtype = const.MESH_TYPE_MAP["mesh"]["type"]
2143  # Create a Container symbol for the module
2144  csym = self._symbol_table_symbol_table.find_or_create_tag(
2145  mmod, symbol_type=ContainerSymbol)
2146  # Create a TypeSymbol for the mesh type
2147  mtype_sym = self._symbol_table_symbol_table.find_or_create_tag(
2148  mtype, symbol_type=DataTypeSymbol,
2149  datatype=UnresolvedType(),
2150  interface=ImportInterface(csym))
2151 
2152  name_list = []
2153  for name in mesh_tags:
2154  name_list.append(self._symbol_table_symbol_table.find_or_create_tag(
2155  name, symbol_type=DataSymbol, datatype=mtype_sym).name)
2156 
2157  if Config.get().distributed_memory:
2158  # If distributed memory is enabled then we require a variable
2159  # holding the maximum halo depth for each mesh.
2160  for name in mesh_tags:
2161  var_name = f"max_halo_depth_{name}"
2162  self._symbol_table_symbol_table.find_or_create_integer_symbol(
2163  var_name, tag=var_name)
2164 
2165  def _colourmap_init(self):
2166  '''
2167  Sets-up information on any required colourmaps. This cannot be done
2168  in the constructor since colouring is applied by Transformations
2169  and happens after the Schedule has already been constructed. Therefore,
2170  this method is called at code-generation time.
2171 
2172  '''
2173  # pylint: disable=too-many-locals
2174  const = LFRicConstants()
2175  non_intergrid_kern = None
2176  sym_tab = self._schedule_schedule.symbol_table
2177 
2178  for call in [call for call in self._schedule_schedule.coded_kernels() if
2179  call.is_coloured()]:
2180  # Keep a record of whether or not any kernels (loops) in this
2181  # invoke have been coloured and, if so, whether the associated loop
2182  # goes into the halo.
2183  if (call.parent.parent.upper_bound_name in
2184  const.HALO_ACCESS_LOOP_BOUNDS):
2185  self._needs_colourmap_halo_needs_colourmap_halo = True
2186  else:
2187  self._needs_colourmap_needs_colourmap = True
2188 
2189  if not call.is_intergrid:
2190  non_intergrid_kern = call
2191  continue
2192 
2193  # This is an inter-grid kernel so look-up the names of
2194  # the colourmap variables associated with the coarse
2195  # mesh (since that determines the iteration space).
2196  carg_name = call._intergrid_ref.coarse.name
2197  # Colour map
2198  base_name = "cmap_" + carg_name
2199  colour_map = sym_tab.find_or_create_array(
2200  base_name, 2, ScalarType.Intrinsic.INTEGER,
2201  tag=base_name)
2202  # No. of colours
2203  base_name = "ncolour_" + carg_name
2204  ncolours = sym_tab.find_or_create_integer_symbol(
2205  base_name, tag=base_name)
2206  # Array holding the last cell of a given colour.
2207  if (Config.get().distributed_memory and
2208  not call.all_updates_are_writes):
2209  # This will require a loop into the halo and so the array is
2210  # 2D (indexed by colour *and* halo depth).
2211  base_name = "last_halo_cell_all_colours_" + carg_name
2212  last_cell = self._schedule_schedule.symbol_table.find_or_create_array(
2213  base_name, 2, ScalarType.Intrinsic.INTEGER, tag=base_name)
2214  else:
2215  # Array holding the last edge cell of a given colour. Just 1D
2216  # as indexed by colour only.
2217  base_name = "last_edge_cell_all_colours_" + carg_name
2218  last_cell = self._schedule_schedule.symbol_table.find_or_create_array(
2219  base_name, 1, ScalarType.Intrinsic.INTEGER, tag=base_name)
2220  # Add these symbols into the DynInterGrid entry for this kernel
2221  call._intergrid_ref.set_colour_info(colour_map, ncolours,
2222  last_cell)
2223 
2224  if non_intergrid_kern and (self._needs_colourmap_needs_colourmap or
2225  self._needs_colourmap_halo_needs_colourmap_halo):
2226  # There aren't any inter-grid kernels but we do need colourmap
2227  # information and that means we'll need a mesh object
2228  self._add_mesh_symbols_add_mesh_symbols(["mesh"])
2229  # This creates the colourmap information for this invoke if we
2230  # don't already have one.
2231  colour_map = non_intergrid_kern.colourmap
2232  # No. of colours
2233  ncolours = sym_tab.find_or_create_integer_symbol(
2234  "ncolour", tag="ncolour").name
2235  if self._needs_colourmap_halo_needs_colourmap_halo:
2236  sym_tab.find_or_create_array(
2237  "last_halo_cell_all_colours", 2,
2238  ScalarType.Intrinsic.INTEGER,
2239  tag="last_halo_cell_all_colours")
2240  if self._needs_colourmap_needs_colourmap:
2241  sym_tab.find_or_create_array(
2242  "last_edge_cell_all_colours", 1,
2243  ScalarType.Intrinsic.INTEGER,
2244  tag="last_edge_cell_all_colours")
2245 
2246  def declarations(self, parent):
2247  '''
2248  Declare variables specific to mesh objects.
2249 
2250  :param parent: the parent node to which to add the declarations
2251  :type parent: :py:class:`psyclone.f2pygen.BaseGen`
2252 
2253  '''
2254  # pylint: disable=too-many-locals, too-many-statements
2255  api_config = Config.get().api_conf("dynamo0.3")
2256  const = LFRicConstants()
2257 
2258  # Since we're now generating code, any transformations must
2259  # have been applied so we can set-up colourmap information
2260  self._colourmap_init_colourmap_init()
2261 
2262  # We'll need various typedefs from the mesh module
2263  mtype = const.MESH_TYPE_MAP["mesh"]["type"]
2264  mmod = const.MESH_TYPE_MAP["mesh"]["module"]
2265  mmap_type = const.MESH_TYPE_MAP["mesh_map"]["type"]
2266  mmap_mod = const.MESH_TYPE_MAP["mesh_map"]["module"]
2267  if self._mesh_tag_names_mesh_tag_names:
2268  name = self._symbol_table_symbol_table.lookup_with_tag(mtype).name
2269  parent.add(UseGen(parent, name=mmod, only=True,
2270  funcnames=[name]))
2271  if self.intergrid_kernelsintergrid_kernels:
2272  parent.add(UseGen(parent, name=mmap_mod, only=True,
2273  funcnames=[mmap_type]))
2274  # Declare the mesh object(s) and associated halo depths
2275  for tag_name in self._mesh_tag_names_mesh_tag_names:
2276  name = self._symbol_table_symbol_table.lookup_with_tag(tag_name).name
2277  parent.add(TypeDeclGen(parent, pointer=True, datatype=mtype,
2278  entity_decls=[name + " => null()"]))
2279  # For each mesh we also need the maximum halo depth.
2280  if Config.get().distributed_memory:
2281  name = self._symbol_table_symbol_table.lookup_with_tag(
2282  f"max_halo_depth_{tag_name}").name
2283  parent.add(DeclGen(parent, datatype="integer",
2284  kind=api_config.default_kind["integer"],
2285  entity_decls=[name]))
2286 
2287  # Declare the inter-mesh map(s) and cell map(s)
2288  for kern in self.intergrid_kernelsintergrid_kernels:
2289  parent.add(TypeDeclGen(parent, pointer=True,
2290  datatype=mmap_type,
2291  entity_decls=[kern.mmap + " => null()"]))
2292  parent.add(
2293  DeclGen(parent, pointer=True, datatype="integer",
2294  kind=api_config.default_kind["integer"],
2295  entity_decls=[kern.cell_map + "(:,:,:) => null()"]))
2296 
2297  # Declare the number of cells in the fine mesh and how many fine
2298  # cells there are per coarse cell
2299  parent.add(DeclGen(parent, datatype="integer",
2300  kind=api_config.default_kind["integer"],
2301  entity_decls=[kern.ncell_fine,
2302  kern.ncellpercellx,
2303  kern.ncellpercelly]))
2304  # Declare variables to hold the colourmap information if required
2305  if kern.colourmap_symbol:
2306  parent.add(
2307  DeclGen(parent, datatype="integer",
2308  kind=api_config.default_kind["integer"],
2309  pointer=True,
2310  entity_decls=[kern.colourmap_symbol.name+"(:,:)"]))
2311  parent.add(
2312  DeclGen(parent, datatype="integer",
2313  kind=api_config.default_kind["integer"],
2314  entity_decls=[kern.ncolours_var_symbol.name]))
2315  # The cell-count array is 2D if we go into the halo and 1D
2316  # otherwise (i.e. no DM or this kernel is GH_WRITE only and
2317  # does not access the halo).
2318  dim_list = len(kern.last_cell_var_symbol.datatype.shape)*":"
2319  decln = (f"{kern.last_cell_var_symbol.name}("
2320  f"{','.join(dim_list)})")
2321  parent.add(
2322  DeclGen(parent, datatype="integer", allocatable=True,
2323  kind=api_config.default_kind["integer"],
2324  entity_decls=[decln]))
2325 
2326  if not self.intergrid_kernelsintergrid_kernels and (self._needs_colourmap_needs_colourmap or
2327  self._needs_colourmap_halo_needs_colourmap_halo):
2328  # There aren't any inter-grid kernels but we do need
2329  # colourmap information
2330  base_name = "cmap"
2331  csym = self._schedule_schedule.symbol_table.lookup_with_tag("cmap")
2332  colour_map = csym.name
2333  # No. of colours
2334  base_name = "ncolour"
2335  ncolours = \
2336  self._schedule_schedule.symbol_table.find_or_create_tag(base_name).name
2337  # Add declarations for these variables
2338  parent.add(DeclGen(parent, datatype="integer",
2339  kind=api_config.default_kind["integer"],
2340  pointer=True,
2341  entity_decls=[colour_map+"(:,:)"]))
2342  parent.add(DeclGen(parent, datatype="integer",
2343  kind=api_config.default_kind["integer"],
2344  entity_decls=[ncolours]))
2345  if self._needs_colourmap_halo_needs_colourmap_halo:
2346  last_cell = self._symbol_table_symbol_table.find_or_create_tag(
2347  "last_halo_cell_all_colours")
2348  parent.add(DeclGen(parent, datatype="integer",
2349  kind=api_config.default_kind["integer"],
2350  allocatable=True,
2351  entity_decls=[last_cell.name+"(:,:)"]))
2352  if self._needs_colourmap_needs_colourmap:
2353  last_cell = self._symbol_table_symbol_table.find_or_create_tag(
2354  "last_edge_cell_all_colours")
2355  parent.add(DeclGen(parent, datatype="integer",
2356  kind=api_config.default_kind["integer"],
2357  allocatable=True,
2358  entity_decls=[last_cell.name+"(:)"]))
2359 
2360  def initialise(self, parent):
2361  '''
2362  Initialise parameters specific to inter-grid kernels.
2363 
2364  :param parent: the parent node to which to add the initialisations.
2365  :type parent: :py:class:`psyclone.f2pygen.BaseGen`
2366 
2367  '''
2368  # pylint: disable=too-many-branches
2369  # If we haven't got any need for a mesh in this invoke then we
2370  # don't do anything
2371  if not self._mesh_tag_names_mesh_tag_names:
2372  return
2373 
2374  parent.add(CommentGen(parent, ""))
2375 
2376  if len(self._mesh_tag_names_mesh_tag_names) == 1:
2377  # We only require one mesh object which means that this invoke
2378  # contains no inter-grid kernels (which would require at least 2)
2379  parent.add(CommentGen(parent, " Create a mesh object"))
2380  parent.add(CommentGen(parent, ""))
2381  rhs = "%".join([self._first_var_first_var.proxy_name_indexed,
2382  self._first_var_first_var.ref_name(), "get_mesh()"])
2383  mesh_name = self._symbol_table_symbol_table.lookup_with_tag(
2384  self._mesh_tag_names_mesh_tag_names[0]).name
2385  parent.add(AssignGen(parent, pointer=True, lhs=mesh_name, rhs=rhs))
2386  if Config.get().distributed_memory:
2387  # If distributed memory is enabled then we need the maximum
2388  # halo depth.
2389  depth_name = self._symbol_table_symbol_table.lookup_with_tag(
2390  f"max_halo_depth_{self._mesh_tag_names[0]}").name
2391  parent.add(AssignGen(parent, lhs=depth_name,
2392  rhs=f"{mesh_name}%get_halo_depth()"))
2393  if self._needs_colourmap_needs_colourmap or self._needs_colourmap_halo_needs_colourmap_halo:
2394  parent.add(CommentGen(parent, ""))
2395  parent.add(CommentGen(parent, " Get the colourmap"))
2396  parent.add(CommentGen(parent, ""))
2397  # Look-up variable names for colourmap and number of colours
2398  colour_map = self._schedule_schedule.symbol_table.find_or_create_tag(
2399  "cmap").name
2400  ncolour = \
2401  self._schedule_schedule.symbol_table.find_or_create_tag("ncolour")\
2402  .name
2403  # Get the number of colours
2404  parent.add(AssignGen(
2405  parent, lhs=ncolour, rhs=f"{mesh_name}%get_ncolours()"))
2406  # Get the colour map
2407  parent.add(AssignGen(parent, pointer=True, lhs=colour_map,
2408  rhs=f"{mesh_name}%get_colour_map()"))
2409  return
2410 
2411  parent.add(CommentGen(
2412  parent,
2413  " Look-up mesh objects and loop limits for inter-grid kernels"))
2414  parent.add(CommentGen(parent, ""))
2415 
2416  # Keep a list of quantities that we've already initialised so
2417  # that we don't generate duplicate assignments
2418  initialised = []
2419 
2420  # Loop over the DynInterGrid objects
2421  for dig in self.intergrid_kernelsintergrid_kernels:
2422  # We need pointers to both the coarse and the fine mesh as well
2423  # as the maximum halo depth for each.
2424  fine_mesh = self._schedule_schedule.symbol_table.find_or_create_tag(
2425  f"mesh_{dig.fine.name}").name
2426  coarse_mesh = self._schedule_schedule.symbol_table.find_or_create_tag(
2427  f"mesh_{dig.coarse.name}").name
2428  if fine_mesh not in initialised:
2429  initialised.append(fine_mesh)
2430  parent.add(
2431  AssignGen(parent, pointer=True,
2432  lhs=fine_mesh,
2433  rhs="%".join([dig.fine.proxy_name_indexed,
2434  dig.fine.ref_name(),
2435  "get_mesh()"])))
2436  if Config.get().distributed_memory:
2437  max_halo_f_mesh = (
2438  self._schedule_schedule.symbol_table.find_or_create_tag(
2439  f"max_halo_depth_mesh_{dig.fine.name}").name)
2440 
2441  parent.add(AssignGen(parent, lhs=max_halo_f_mesh,
2442  rhs=f"{fine_mesh}%get_halo_depth()"))
2443  if coarse_mesh not in initialised:
2444  initialised.append(coarse_mesh)
2445  parent.add(
2446  AssignGen(parent, pointer=True,
2447  lhs=coarse_mesh,
2448  rhs="%".join([dig.coarse.proxy_name_indexed,
2449  dig.coarse.ref_name(),
2450  "get_mesh()"])))
2451  if Config.get().distributed_memory:
2452  max_halo_c_mesh = (
2453  self._schedule_schedule.symbol_table.find_or_create_tag(
2454  f"max_halo_depth_mesh_{dig.coarse.name}").name)
2455  parent.add(AssignGen(
2456  parent, lhs=max_halo_c_mesh,
2457  rhs=f"{coarse_mesh}%get_halo_depth()"))
2458  # We also need a pointer to the mesh map which we get from
2459  # the coarse mesh
2460  if dig.mmap not in initialised:
2461  initialised.append(dig.mmap)
2462  parent.add(
2463  AssignGen(parent, pointer=True,
2464  lhs=dig.mmap,
2465  rhs=f"{coarse_mesh}%get_mesh_map({fine_mesh})"))
2466 
2467  # Cell map. This is obtained from the mesh map.
2468  if dig.cell_map not in initialised:
2469  initialised.append(dig.cell_map)
2470  parent.add(
2471  AssignGen(parent, pointer=True, lhs=dig.cell_map,
2472  rhs=dig.mmap+"%get_whole_cell_map()"))
2473 
2474  # Number of cells in the fine mesh
2475  if dig.ncell_fine not in initialised:
2476  initialised.append(dig.ncell_fine)
2477  if Config.get().distributed_memory:
2478  # TODO this hardwired depth of 2 will need changing in
2479  # order to support redundant computation
2480  parent.add(
2481  AssignGen(parent, lhs=dig.ncell_fine,
2482  rhs=(fine_mesh+"%get_last_halo_cell"
2483  "(depth=2)")))
2484  else:
2485  parent.add(
2486  AssignGen(parent, lhs=dig.ncell_fine,
2487  rhs="%".join([dig.fine.proxy_name,
2488  dig.fine.ref_name(),
2489  "get_ncell()"])))
2490 
2491  # Number of fine cells per coarse cell in x.
2492  if dig.ncellpercellx not in initialised:
2493  initialised.append(dig.ncellpercellx)
2494  parent.add(
2495  AssignGen(parent, lhs=dig.ncellpercellx,
2496  rhs=dig.mmap +
2497  "%get_ntarget_cells_per_source_x()"))
2498 
2499  # Number of fine cells per coarse cell in y.
2500  if dig.ncellpercelly not in initialised:
2501  initialised.append(dig.ncellpercelly)
2502  parent.add(
2503  AssignGen(parent, lhs=dig.ncellpercelly,
2504  rhs=dig.mmap +
2505  "%get_ntarget_cells_per_source_y()"))
2506 
2507  # Colour map for the coarse mesh (if required)
2508  if dig.colourmap_symbol:
2509  # Number of colours
2510  parent.add(AssignGen(parent, lhs=dig.ncolours_var_symbol.name,
2511  rhs=coarse_mesh + "%get_ncolours()"))
2512  # Colour map itself
2513  parent.add(AssignGen(parent, lhs=dig.colourmap_symbol.name,
2514  pointer=True,
2515  rhs=coarse_mesh + "%get_colour_map()"))
2516  # Last halo/edge cell per colour.
2517  sym = dig.last_cell_var_symbol
2518  if len(sym.datatype.shape) == 2:
2519  # Array is 2D so is a halo access.
2520  name = "%get_last_halo_cell_all_colours()"
2521  else:
2522  # Array is just 1D so go to the last edge cell.
2523  name = "%get_last_edge_cell_all_colours()"
2524  parent.add(AssignGen(parent, lhs=sym.name,
2525  rhs=coarse_mesh + name))
2526 
2527  @property
2529  '''
2530  :returns: A list of objects describing the intergrid kernels used in
2531  this invoke.
2532  :rtype: list[:py:class:`psyclone.dynamo3p0.DynInterGrid`]
2533  '''
2534  intergrids = []
2535  for call in self._schedule_schedule.coded_kernels():
2536  if call.is_intergrid:
2537  intergrids.append(call._intergrid_ref)
2538  return intergrids
2539 
2540 
2542  '''
2543  Holds information on quantities required by an inter-grid kernel.
2544 
2545  :param fine_arg: Kernel argument on the fine mesh.
2546  :type fine_arg: :py:class:`psyclone.dynamo0p3.DynKernelArgument`
2547  :param coarse_arg: Kernel argument on the coarse mesh.
2548  :type coarse_arg: :py:class:`psyclone.dynamo0p3.DynKernelArgument`
2549  '''
2550  # pylint: disable=too-few-public-methods, too-many-instance-attributes
2551  def __init__(self, fine_arg, coarse_arg):
2552 
2553  # Arguments on the coarse and fine grids
2554  self.coarsecoarse = coarse_arg
2555  self.finefine = fine_arg
2556 
2557  # Get a reference to the InvokeSchedule SymbolTable
2558  symtab = self.coarsecoarse.call.ancestor(InvokeSchedule).symbol_table
2559 
2560  # Generate name for inter-mesh map
2561  base_mmap_name = f"mmap_{fine_arg.name}_{coarse_arg.name}"
2562  self.mmapmmap = symtab.find_or_create_tag(base_mmap_name).name
2563 
2564  # Generate name for ncell variables
2565  name = f"ncell_{fine_arg.name}"
2566  self.ncell_finencell_fine = symtab.find_or_create_integer_symbol(
2567  name, tag=name).name
2568  # No. of fine cells per coarse cell in x
2569  name = f"ncpc_{fine_arg.name}_{coarse_arg.name}_x"
2570  self.ncellpercellxncellpercellx = symtab.find_or_create_integer_symbol(
2571  name, tag=name).name
2572  # No. of fine cells per coarse cell in y
2573  name = f"ncpc_{fine_arg.name}_{coarse_arg.name}_y"
2574  self.ncellpercellyncellpercelly = symtab.find_or_create_integer_symbol(
2575  name, tag=name).name
2576  # Name for cell map
2577  base_name = "cell_map_" + coarse_arg.name
2578  sym = symtab.find_or_create_array(base_name, 3,
2579  ScalarType.Intrinsic.INTEGER,
2580  tag=base_name)
2581  self.cell_mapcell_map = sym.name
2582 
2583  # We have no colourmap information when first created
2584  self._colourmap_symbol_colourmap_symbol = None
2585  # Symbol for the variable holding the number of colours
2586  self._ncolours_var_symbol_ncolours_var_symbol = None
2587  # Symbol of the variable holding the last cell of a particular colour.
2588  # Will be a 2D array if the kernel iteration space includes the halo
2589  # and 1D otherwise.
2590  self._last_cell_var_symbol_last_cell_var_symbol = None
2591 
2592  def set_colour_info(self, colour_map, ncolours, last_cell):
2593  '''Sets the colour_map, number of colours, and
2594  last cell of a particular colour.
2595 
2596  :param colour_map: the colour map symbol.
2597  :type: colour_map:py:class:`psyclone.psyir.symbols.Symbol`
2598  :param ncolours: the number of colours.
2599  :type: ncolours: :py:class:`psyclone.psyir.symbols.Symbol`
2600  :param last_cell: the last halo cell of a particular colour.
2601  :type last_cell: :py:class:`psyclone.psyir.symbols.Symbol`
2602 
2603  '''
2604  self._colourmap_symbol_colourmap_symbol = colour_map
2605  self._ncolours_var_symbol_ncolours_var_symbol = ncolours
2606  self._last_cell_var_symbol_last_cell_var_symbol = last_cell
2607 
2608  @property
2609  def colourmap_symbol(self):
2610  ''':returns: the colour map symbol.
2611  :rtype: :py:class:`psyclone.psyir.symbols.Symbol`
2612  '''
2613  return self._colourmap_symbol_colourmap_symbol
2614 
2615  @property
2617  ''':returns: the symbol for storing the number of colours.
2618  :rtype: :py:class:`psyclone.psyir.symbols.Symbol`
2619  '''
2620  return self._ncolours_var_symbol_ncolours_var_symbol
2621 
2622  @property
2624  ''':returns: the last halo/edge cell variable.
2625  :rtype: :py:class:`psyclone.psyir.symbols.Symbol`
2626  '''
2627  return self._last_cell_var_symbol_last_cell_var_symbol
2628 
2629 
2631  ''' Holds all information on the basis and differential basis
2632  functions required by an invoke or kernel call. This covers both those
2633  required for quadrature and for evaluators.
2634 
2635  :param node: either the schedule of an Invoke or a single Kernel object \
2636  for which to extract information on all required \
2637  basis/diff-basis functions.
2638  :type node: :py:class:`psyclone.dynamo0p3.DynInvokeSchedule` or \
2639  :py:class:`psyclone.domain.lfric.LFRicKern`
2640 
2641  :raises InternalError: if a call has an unrecognised evaluator shape.
2642 
2643  '''
2644  # Dimensioning vars for the basis function arrays required by each
2645  # type of quadrature
2646  qr_dim_vars = {"xyoz": ["np_xy", "np_z"],
2647  "edge": ["np_xyz", "nedges"],
2648  "face": ["np_xyz", "nfaces"]}
2649  # The different weights arrays required by each type of quadrature
2650  qr_weight_vars = {"xyoz": ["weights_xy", "weights_z"],
2651  "edge": ["weights_xyz"],
2652  "face": ["weights_xyz"]}
2653 
2654  def __init__(self, node):
2655 
2656  super().__init__(node)
2657 
2658  # Construct a list of all the basis/diff-basis functions required
2659  # by this invoke. Each entry in the list is a dictionary holding
2660  # the shape, the function space and the 'target' function spaces
2661  # (upon which the basis functions are evaluated).
2662  self._basis_fns_basis_fns = []
2663  # The dictionary of quadrature objects passed to this invoke. Keys
2664  # are the various VALID_QUADRATURE_SHAPES, values are a list of
2665  # associated quadrature variables. (i.e. we have a list of
2666  # quadrature arguments for each shape.)
2667  self._qr_vars_qr_vars = OrderedDict()
2668  # The dict of target function spaces upon which we must provide
2669  # evaluators. Keys are the FS names, values are (FunctionSpace,
2670  # DynKernelArgument) tuples.
2671  self._eval_targets_eval_targets = OrderedDict()
2672 
2673  for call in self._calls_calls:
2674 
2675  if isinstance(call, LFRicBuiltIn) or not call.eval_shapes:
2676  # Skip this kernel if it doesn't require basis/diff basis fns
2677  continue
2678 
2679  for shape, rule in call.qr_rules.items():
2680 
2681  # This kernel requires quadrature
2682  if shape not in self._qr_vars_qr_vars:
2683  # We haven't seen a quadrature arg with this shape
2684  # before so create a dictionary entry with an
2685  # empty list
2686  self._qr_vars_qr_vars[shape] = []
2687  if rule.psy_name not in self._qr_vars_qr_vars[shape]:
2688  # Add this qr argument to the list of those that
2689  # have this shape
2690  self._qr_vars_qr_vars[shape].append(rule.psy_name)
2691 
2692  if "gh_evaluator" in call.eval_shapes:
2693  # An evaluator consists of basis or diff basis functions
2694  # for one FS evaluated on the nodes of another 'target' FS.
2695  # Make a dict of 2-tuples, each containing the
2696  # FunctionSpace and associated kernel argument for the
2697  # target FSs.
2698 
2699  # Loop over the target FS for evaluators required by this
2700  # kernel
2701  for fs_name in call.eval_targets:
2702  if fs_name not in self._eval_targets_eval_targets:
2703  # We don't already have this space in our list so
2704  # add it to the list of target spaces
2705  self._eval_targets_eval_targets[fs_name] = \
2706  call.eval_targets[fs_name]
2707 
2708  # Both quadrature and evaluators require basis and/or differential
2709  # basis functions. This helper routine populates self._basis_fns
2710  # with entries describing the basis functions required by
2711  # this call.
2712  self._setup_basis_fns_for_call_setup_basis_fns_for_call(call)
2713 
2714  @staticmethod
2715  def basis_first_dim_name(function_space):
2716  '''
2717  Get the name of the variable holding the first dimension of a
2718  basis function
2719 
2720  :param function_space: the function space the basis function is for
2721  :type function_space: :py:class:`psyclone.domain.lfric.FunctionSpace`
2722  :return: a Fortran variable name
2723  :rtype: str
2724 
2725  '''
2726  return "dim_" + function_space.mangled_name
2727 
2728  @staticmethod
2729  def basis_first_dim_value(function_space):
2730  '''
2731  Get the size of the first dimension of a basis function.
2732 
2733  :param function_space: the function space the basis function is for
2734  :type function_space: :py:class:`psyclone.domain.lfric.FunctionSpace`
2735  :return: an integer length.
2736  :rtype: string
2737 
2738  :raises GenerationError: if an unsupported function space is supplied \
2739  (e.g. ANY_SPACE_*, ANY_DISCONTINUOUS_SPACE_*)
2740  '''
2741  if function_space.has_scalar_basis:
2742  first_dim = "1"
2743  elif function_space.has_vector_basis:
2744  first_dim = "3"
2745  else:
2746  # It is not possible to determine explicitly the first basis
2747  # function array dimension from the metadata for any_space or
2748  # any_discontinuous_space. This information needs to be passed
2749  # from the PSy layer to the kernels (see issue #461).
2750  const = LFRicConstants()
2751  raise GenerationError(
2752  f"Unsupported space for basis function, "
2753  f"expecting one of {const.VALID_FUNCTION_SPACES} but found "
2754  f"'{function_space.orig_name}'")
2755  return first_dim
2756 
2757  @staticmethod
2758  def diff_basis_first_dim_name(function_space):
2759  '''
2760  Get the name of the variable holding the first dimension of a
2761  differential basis function.
2762 
2763  :param function_space: the function space the diff-basis function \
2764  is for.
2765  :type function_space: :py:class:`psyclone.domain.lfric.FunctionSpace`
2766  :return: a Fortran variable name.
2767  :rtype: str
2768 
2769  '''
2770  return "diff_dim_" + function_space.mangled_name
2771 
2772  @staticmethod
2773  def diff_basis_first_dim_value(function_space):
2774  '''
2775  Get the size of the first dimension of an array for a
2776  differential basis function.
2777 
2778  :param function_space: the function space the diff-basis function \
2779  is for.
2780  :type function_space: :py:class:`psyclone.domain.lfric.FunctionSpace`
2781  :return: an integer length.
2782  :rtype: str
2783 
2784  :raises GenerationError: if an unsupported function space is \
2785  supplied (e.g. ANY_SPACE_*, \
2786  ANY_DISCONTINUOUS_SPACE_*)
2787 
2788  '''
2789  if function_space.has_scalar_diff_basis:
2790  first_dim = "1"
2791  elif function_space.has_vector_diff_basis:
2792  first_dim = "3"
2793  else:
2794  # It is not possible to determine explicitly the first
2795  # differential basis function array dimension from the metadata
2796  # for any_space or any_discontinuous_space. This information
2797  # needs to be passed from the PSy layer to the kernels
2798  # (see issue #461).
2799  const = LFRicConstants()
2800  raise GenerationError(
2801  f"Unsupported space for differential basis function, "
2802  f"expecting one of {const.VALID_FUNCTION_SPACES} but found "
2803  f"'{function_space.orig_name}'")
2804  return first_dim
2805 
2806  def _setup_basis_fns_for_call(self, call):
2807  '''
2808  Populates self._basis_fns with entries describing the basis
2809  functions required by the supplied Call.
2810 
2811  :param call: the kernel call for which basis functions are required.
2812  :type call: :py:class:`psyclone.domain.lfric.LFRicKern`
2813 
2814  :raises InternalError: if the supplied call is of incorrect type.
2815  :raises InternalError: if the supplied call has an unrecognised \
2816  evaluator shape.
2817  '''
2818  if not isinstance(call, LFRicKern):
2819  raise InternalError(f"Expected a LFRicKern object but got: "
2820  f"'{type(call)}'")
2821  const = LFRicConstants()
2822  # We need a full FunctionSpace object for each function space
2823  # that has basis functions associated with it.
2824  for fsd in call.fs_descriptors.descriptors:
2825 
2826  # We need the full FS object, not just the name. Therefore
2827  # we first have to get a kernel argument that is on this
2828  # space...
2829  arg, fspace = call.arguments.get_arg_on_space_name(fsd.fs_name)
2830 
2831  for shape in call.eval_shapes:
2832 
2833  # Populate a dict with the shape, function space and
2834  # associated kernel argument for this basis/diff-basis f'n.
2835  entry = {"shape": shape,
2836  "fspace": fspace,
2837  "arg": arg}
2838  if shape in const.VALID_QUADRATURE_SHAPES:
2839  # This is for quadrature - store the name of the
2840  # qr variable
2841  entry["qr_var"] = call.qr_rules[shape].psy_name
2842  # Quadrature weights are evaluated at pre-determined
2843  # points rather than at the nodes of another FS.
2844  # We put one entry of None in the list of target
2845  # spaces to facilitate cases where we loop over
2846  # this list.
2847  entry["nodal_fspaces"] = [None]
2848  elif shape == "gh_evaluator":
2849  # This is an evaluator
2850  entry["qr_var"] = None
2851  # Store a list of the FunctionSpace objects for which
2852  # these basis functions are to be evaluated
2853  entry["nodal_fspaces"] = [items[0] for items in
2854  call.eval_targets.values()]
2855  else:
2856  raise InternalError(f"Unrecognised evaluator shape: "
2857  f"'{shape}'. Should be one of "
2858  f"{const.VALID_EVALUATOR_SHAPES}")
2859 
2860  # Add our newly-constructed dict object to the list describing
2861  # the required basis and/or differential basis functions for
2862  # this Invoke.
2863  if fsd.requires_basis:
2864  entry["type"] = "basis"
2865  self._basis_fns_basis_fns.append(entry)
2866  if fsd.requires_diff_basis:
2867  # Take a shallow copy of the dict and just modify the
2868  # 'type' of the basis function it describes (this works
2869  # because the 'type' entry is a primitive type [str]).
2870  diff_entry = entry.copy()
2871  diff_entry["type"] = "diff-basis"
2872  self._basis_fns_basis_fns.append(diff_entry)
2873 
2874  def _stub_declarations(self, parent):
2875  '''
2876  Insert the variable declarations required by the basis functions into
2877  the Kernel stub.
2878 
2879  :param parent: the f2pygen node representing the Kernel stub.
2880  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
2881 
2882  :raises InternalError: if an unsupported quadrature shape is found.
2883 
2884  '''
2885  api_config = Config.get().api_conf("dynamo0.3")
2886 
2887  if not self._qr_vars_qr_vars and not self._eval_targets_eval_targets:
2888  return
2889 
2890  # The quadrature shapes that this method supports
2891  supported_shapes = ["gh_quadrature_xyoz", "gh_quadrature_face",
2892  "gh_quadrature_edge"]
2893 
2894  # Get the lists of dimensioning variables and basis arrays
2895  var_dims, basis_arrays = self._basis_fn_declns_basis_fn_declns()
2896 
2897  if var_dims:
2898  parent.add(DeclGen(parent, datatype="integer",
2899  kind=api_config.default_kind["integer"],
2900  intent="in", entity_decls=var_dims))
2901  for basis in basis_arrays:
2902  parent.add(DeclGen(parent, datatype="real",
2903  kind=api_config.default_kind["real"],
2904  intent="in",
2905  dimension=",".join(basis_arrays[basis]),
2906  entity_decls=[basis]))
2907 
2908  const = LFRicConstants()
2909 
2910  for shape in self._qr_vars_qr_vars:
2911  qr_name = "_qr_" + shape.split("_")[-1]
2912  if shape == "gh_quadrature_xyoz":
2913  datatype = const.QUADRATURE_TYPE_MAP[shape]["intrinsic"]
2914  kind = const.QUADRATURE_TYPE_MAP[shape]["kind"]
2915  parent.add(DeclGen(
2916  parent, datatype=datatype, kind=kind,
2917  intent="in", dimension="np_xy"+qr_name,
2918  entity_decls=["weights_xy"+qr_name]))
2919  parent.add(DeclGen(
2920  parent, datatype=datatype, kind=kind,
2921  intent="in", dimension="np_z"+qr_name,
2922  entity_decls=["weights_z"+qr_name]))
2923  elif shape == "gh_quadrature_face":
2924  parent.add(DeclGen(
2925  parent,
2926  datatype=const.QUADRATURE_TYPE_MAP[shape]["intrinsic"],
2927  kind=const.QUADRATURE_TYPE_MAP[shape]["kind"], intent="in",
2928  dimension=",".join(["np_xyz"+qr_name, "nfaces"+qr_name]),
2929  entity_decls=["weights_xyz"+qr_name]))
2930  elif shape == "gh_quadrature_edge":
2931  parent.add(DeclGen(
2932  parent,
2933  datatype=const.QUADRATURE_TYPE_MAP[shape]["intrinsic"],
2934  kind=const.QUADRATURE_TYPE_MAP[shape]["kind"], intent="in",
2935  dimension=",".join(["np_xyz"+qr_name, "nedges"+qr_name]),
2936  entity_decls=["weights_xyz"+qr_name]))
2937  else:
2938  raise InternalError(
2939  f"Quadrature shapes other than {supported_shapes} are not "
2940  f"yet supported - got: '{shape}'")
2941 
2942  def _invoke_declarations(self, parent):
2943  '''
2944  Add basis-function declarations to the PSy layer.
2945 
2946  :param parent: f2pygen node represening the PSy-layer routine.
2947  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
2948 
2949  '''
2950  # Create a single declaration for each quadrature type
2951  const = LFRicConstants()
2952  for shape in const.VALID_QUADRATURE_SHAPES:
2953  if shape in self._qr_vars_qr_vars and self._qr_vars_qr_vars[shape]:
2954  # The PSy-layer routine is passed objects of
2955  # quadrature_* type
2956  parent.add(
2957  TypeDeclGen(parent,
2958  datatype=const.
2959  QUADRATURE_TYPE_MAP[shape]["type"],
2960  entity_decls=self._qr_vars_qr_vars[shape],
2961  intent="in"))
2962  # For each of these we'll need a corresponding proxy, use
2963  # the symbol_table to avoid clashes...
2964  var_names = []
2965  for var in self._qr_vars_qr_vars[shape]:
2966  var_names.append(
2967  self._symbol_table_symbol_table.find_or_create_tag(var+"_proxy")
2968  .name)
2969  parent.add(
2970  TypeDeclGen(
2971  parent,
2972  datatype=const.
2973  QUADRATURE_TYPE_MAP[shape]["proxy_type"],
2974  entity_decls=var_names))
2975 
2976  def initialise(self, parent):
2977  '''
2978  Create the declarations and assignments required for the
2979  basis-functions required by an invoke. These are added as children
2980  of the supplied parent node in the AST.
2981 
2982  :param parent: the node in the f2pygen AST that will be the
2983  parent of all of the declarations and assignments.
2984  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
2985 
2986  :raises InternalError: if an invalid entry is encountered in the \
2987  self._basis_fns list.
2988  '''
2989  # pylint: disable=too-many-branches, too-many-locals
2990  api_config = Config.get().api_conf("dynamo0.3")
2991  const = LFRicConstants()
2992  basis_declarations = []
2993 
2994  # We need BASIS and/or DIFF_BASIS if any kernel requires quadrature
2995  # or an evaluator
2996  if self._qr_vars_qr_vars or self._eval_targets_eval_targets:
2997  parent.add(
2998  UseGen(parent, name=const.
2999  FUNCTION_SPACE_TYPE_MAP["function_space"]["module"],
3000  only=True, funcnames=["BASIS", "DIFF_BASIS"]))
3001 
3002  if self._qr_vars_qr_vars:
3003  parent.add(CommentGen(parent, ""))
3004  parent.add(CommentGen(parent, " Look-up quadrature variables"))
3005  parent.add(CommentGen(parent, ""))
3006 
3007  # Look-up the module- and type-names from the QUADRATURE_TYPE_MAP
3008  for shp in self._qr_vars_qr_vars:
3009  quad_map = const.QUADRATURE_TYPE_MAP[shp]
3010  parent.add(UseGen(parent,
3011  name=quad_map["module"],
3012  only=True,
3013  funcnames=[quad_map["type"],
3014  quad_map["proxy_type"]]))
3015  self._initialise_xyz_qr_initialise_xyz_qr(parent)
3016  self._initialise_xyoz_qr_initialise_xyoz_qr(parent)
3017  self._initialise_xoyoz_qr_initialise_xoyoz_qr(parent)
3018  self._initialise_face_or_edge_qr_initialise_face_or_edge_qr(parent, "face")
3019  self._initialise_face_or_edge_qr_initialise_face_or_edge_qr(parent, "edge")
3020 
3021  if self._eval_targets_eval_targets:
3022  parent.add(CommentGen(parent, ""))
3023  parent.add(CommentGen(parent,
3024  " Initialise evaluator-related quantities "
3025  "for the target function spaces"))
3026  parent.add(CommentGen(parent, ""))
3027 
3028  for (fspace, arg) in self._eval_targets_eval_targets.values():
3029  # We need the list of nodes for each unique FS upon which we need
3030  # to evaluate basis/diff-basis functions
3031  nodes_name = "nodes_" + fspace.mangled_name
3032  parent.add(AssignGen(
3033  parent, lhs=nodes_name,
3034  rhs="%".join([arg.proxy_name_indexed, arg.ref_name(fspace),
3035  "get_nodes()"]),
3036  pointer=True))
3037  my_kind = api_config.default_kind["real"]
3038  parent.add(DeclGen(parent, datatype="real",
3039  kind=my_kind,
3040  pointer=True,
3041  entity_decls=[nodes_name+"(:,:) => null()"]))
3042  const_mod = const.UTILITIES_MOD_MAP["constants"]["module"]
3043  const_mod_uses = self._invoke_invoke.invokes.psy. \
3044  infrastructure_modules[const_mod]
3045  # Record that we will need to import the kind for a
3046  # pointer declaration (associated with a function
3047  # space) from the appropriate infrastructure module
3048  const_mod_uses.add(my_kind)
3049 
3050  if self._basis_fns_basis_fns:
3051  parent.add(CommentGen(parent, ""))
3052  parent.add(CommentGen(parent, " Allocate basis/diff-basis arrays"))
3053  parent.add(CommentGen(parent, ""))
3054 
3055  var_dim_list = []
3056  for basis_fn in self._basis_fns_basis_fns:
3057  # Get the extent of the first dimension of the basis array.
3058  if basis_fn['type'] == "basis":
3059  first_dim = self.basis_first_dim_namebasis_first_dim_name(basis_fn["fspace"])
3060  dim_space = "get_dim_space()"
3061  elif basis_fn['type'] == "diff-basis":
3062  first_dim = self.diff_basis_first_dim_namediff_basis_first_dim_name(
3063  basis_fn["fspace"])
3064  dim_space = "get_dim_space_diff()"
3065  else:
3066  raise InternalError(
3067  f"Unrecognised type of basis function: "
3068  f"'{basis_fn['''type''']}'. Should be either 'basis' or "
3069  f"'diff-basis'.")
3070 
3071  if first_dim not in var_dim_list:
3072  var_dim_list.append(first_dim)
3073  rhs = "%".join(
3074  [basis_fn["arg"].proxy_name_indexed,
3075  basis_fn["arg"].ref_name(basis_fn["fspace"]),
3076  dim_space])
3077  parent.add(AssignGen(parent, lhs=first_dim, rhs=rhs))
3078 
3079  var_dims, basis_arrays = self._basis_fn_declns_basis_fn_declns()
3080 
3081  if var_dims:
3082  # declare dim and diff_dim for all function spaces
3083  parent.add(DeclGen(parent, datatype="integer",
3084  kind=api_config.default_kind["integer"],
3085  entity_decls=var_dims))
3086 
3087  basis_declarations = []
3088  for basis in basis_arrays:
3089  parent.add(
3090  AllocateGen(parent,
3091  basis+"("+", ".join(basis_arrays[basis])+")"))
3092  basis_declarations.append(
3093  basis+"("+",".join([":"]*len(basis_arrays[basis]))+")")
3094 
3095  # declare the basis function arrays
3096  if basis_declarations:
3097  my_kind = api_config.default_kind["real"]
3098  parent.add(DeclGen(parent, datatype="real", kind=my_kind,
3099  allocatable=True,
3100  entity_decls=basis_declarations))
3101  # Default kind (r_def) will always already exist due to
3102  # arrays associated with gh_shape, so there is no need to
3103  # declare it here.
3104 
3105  # Compute the values for any basis arrays
3106  self._compute_basis_fns_compute_basis_fns(parent)
3107 
3108  def _basis_fn_declns(self):
3109  '''
3110  Extracts all information relating to the necessary declarations
3111  for basis-function arrays.
3112 
3113  :returns: a 2-tuple containing a list of dimensioning variables & a \
3114  dict of basis arrays.
3115  :rtype: (list of str, dict)
3116 
3117  :raises InternalError: if neither self._invoke or self._kernel are set.
3118  :raises InternalError: if an unrecognised type of basis function is \
3119  encountered.
3120  :raises InternalError: if an unrecognised evaluator shape is \
3121  encountered.
3122  :raises InternalError: if there is no name for the quadrature object \
3123  when generating PSy-layer code.
3124 
3125  '''
3126  # pylint: disable=too-many-branches
3127  # Dictionary of basis arrays where key values are the array names and
3128  # entries are a list of dimensions.
3129  basis_arrays = OrderedDict()
3130  # List of names of dimensioning (scalar) variables
3131  var_dim_list = []
3132 
3133  const = LFRicConstants()
3134  # Loop over the list of dicts describing each basis function
3135  # required by this Invoke.
3136  for basis_fn in self._basis_fns_basis_fns:
3137  # Get the extent of the first dimension of the basis array and
3138  # store whether we have a basis or a differential basis function.
3139  # Currently there are only those two possible types of basis
3140  # function and we store the required diff basis name in basis_name.
3141  if basis_fn['type'] == "basis":
3142  if self._invoke_invoke:
3143  first_dim = self.basis_first_dim_namebasis_first_dim_name(basis_fn["fspace"])
3144  elif self._kernel_kernel:
3145  first_dim = self.basis_first_dim_valuebasis_first_dim_value(basis_fn["fspace"])
3146  else:
3147  raise InternalError("Require basis functions but do not "
3148  "have either a Kernel or an "
3149  "Invoke. Should be impossible.")
3150  basis_name = "gh_basis"
3151  elif basis_fn['type'] == "diff-basis":
3152  if self._invoke_invoke:
3153  first_dim = self.diff_basis_first_dim_namediff_basis_first_dim_name(
3154  basis_fn["fspace"])
3155  elif self._kernel_kernel:
3156  first_dim = self.diff_basis_first_dim_valuediff_basis_first_dim_value(
3157  basis_fn["fspace"])
3158  else:
3159  raise InternalError("Require differential basis functions "
3160  "but do not have either a Kernel or "
3161  "an Invoke. Should be impossible.")
3162  basis_name = "gh_diff_basis"
3163  else:
3164  raise InternalError(
3165  f"Unrecognised type of basis function: "
3166  f"'{basis_fn['''type''']}'. Should be either 'basis' or "
3167  f"'diff-basis'.")
3168 
3169  if self._invoke_invoke and first_dim not in var_dim_list:
3170  var_dim_list.append(first_dim)
3171 
3172  if basis_fn["shape"] in const.VALID_QUADRATURE_SHAPES:
3173 
3174  qr_var = basis_fn["qr_var"]
3175  if not qr_var:
3176  raise InternalError(
3177  f"Quadrature '{basis_fn['''shape''']}' is required but"
3178  f" have no name for the associated Quadrature object.")
3179 
3180  op_name = basis_fn["fspace"].get_operator_name(basis_name,
3181  qr_var=qr_var)
3182  if op_name in basis_arrays:
3183  # We've already seen a basis with this name so skip
3184  continue
3185 
3186  # Dimensionality of the basis arrays depends on the
3187  # type of quadrature...
3188  alloc_args = qr_basis_alloc_args(first_dim, basis_fn)
3189  for arg in alloc_args:
3190  # In a kernel stub the first dimension of the array is
3191  # a numerical value so make sure we don't try and declare
3192  # it as a variable.
3193  if not arg[0].isdigit() and arg not in var_dim_list:
3194  var_dim_list.append(arg)
3195  basis_arrays[op_name] = alloc_args
3196 
3197  elif basis_fn["shape"].lower() == "gh_evaluator":
3198  # This is an evaluator and thus may be required on more than
3199  # one function space
3200  for target_space in basis_fn["nodal_fspaces"]:
3201  op_name = basis_fn["fspace"].\
3202  get_operator_name(basis_name,
3203  qr_var=basis_fn["qr_var"],
3204  on_space=target_space)
3205  if op_name in basis_arrays:
3206  continue
3207  # We haven't seen a basis with this name before so
3208  # need to store its dimensions
3209  basis_arrays[op_name] = [
3210  first_dim,
3211  basis_fn["fspace"].ndf_name,
3212  target_space.ndf_name]
3213  else:
3214  raise InternalError(
3215  f"Unrecognised evaluator shape: '{basis_fn['''shape''']}'."
3216  f" Should be one of {const.VALID_EVALUATOR_SHAPES}")
3217 
3218  return (var_dim_list, basis_arrays)
3219 
3220  def _initialise_xyz_qr(self, parent):
3221  '''
3222  Add in the initialisation of variables needed for XYZ
3223  quadrature
3224 
3225  :param parent: the node in the AST representing the PSy subroutine
3226  in which to insert the initialisation
3227  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
3228 
3229  '''
3230  # pylint: disable=unused-argument
3231  # This shape is not yet supported so we do nothing
3232  return
3233 
3234  def _initialise_xyoz_qr(self, parent):
3235  '''
3236  Add in the initialisation of variables needed for XYoZ
3237  quadrature
3238 
3239  :param parent: the node in the AST representing the PSy subroutine
3240  in which to insert the initialisation
3241  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
3242 
3243  '''
3244  api_config = Config.get().api_conf("dynamo0.3")
3245 
3246  if "gh_quadrature_xyoz" not in self._qr_vars:
3247  return
3248 
3249  for qr_arg_name in self._qr_vars["gh_quadrature_xyoz"]:
3250 
3251  # We generate unique names for the integers holding the numbers
3252  # of quadrature points by appending the name of the quadrature
3253  # argument
3254  parent.add(
3255  DeclGen(
3256  parent, datatype="integer",
3257  kind=api_config.default_kind["integer"],
3258  entity_decls=[name+"_"+qr_arg_name
3259  for name in self.qr_dim_vars["xyoz"]]))
3260  decl_list = [name+"_"+qr_arg_name+"(:) => null()"
3261  for name in self.qr_weight_vars["xyoz"]]
3262  const = LFRicConstants()
3263  datatype = \
3264  const.QUADRATURE_TYPE_MAP["gh_quadrature_xyoz"]["intrinsic"]
3265  kind = const.QUADRATURE_TYPE_MAP["gh_quadrature_xyoz"]["kind"]
3266  parent.add(
3267  DeclGen(parent, datatype=datatype, kind=kind,
3268  pointer=True, entity_decls=decl_list))
3269  const_mod = const.UTILITIES_MOD_MAP["constants"]["module"]
3270  const_mod_uses = self._invoke.invokes.psy. \
3271  infrastructure_modules[const_mod]
3272  # Record that we will need to import the kind for a
3273  # declaration (associated with quadrature) from
3274  # the appropriate infrastructure module
3275  const_mod_uses.add(kind)
3276 
3277  # Get the quadrature proxy
3278  proxy_name = qr_arg_name + "_proxy"
3279  parent.add(
3280  AssignGen(parent, lhs=proxy_name,
3281  rhs=qr_arg_name+"%"+"get_quadrature_proxy()"))
3282  # Number of points in each dimension
3283  for qr_var in self.qr_dim_vars["xyoz"]:
3284  parent.add(
3285  AssignGen(parent, lhs=qr_var+"_"+qr_arg_name,
3286  rhs=proxy_name+"%"+qr_var))
3287  # Pointers to the weights arrays
3288  for qr_var in self.qr_weight_vars["xyoz"]:
3289  parent.add(
3290  AssignGen(parent, pointer=True,
3291  lhs=qr_var+"_"+qr_arg_name,
3292  rhs=proxy_name+"%"+qr_var))
3293 
3294  def _initialise_xoyoz_qr(self, parent):
3295  '''
3296  Add in the initialisation of variables needed for XoYoZ
3297  quadrature.
3298 
3299  :param parent: the node in the AST representing the PSy subroutine \
3300  in which to insert the initialisation.
3301  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
3302 
3303  '''
3304  # pylint: disable=unused-argument
3305  # This shape is not yet supported so we do nothing
3306  return
3307 
3308  def _initialise_face_or_edge_qr(self, parent, qr_type):
3309  '''
3310  Add in the initialisation of variables needed for face or edge
3311  quadrature.
3312 
3313  :param parent: the node in the AST representing the PSy subroutine \
3314  in which to insert the initialisation.
3315  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
3316  :param str qr_type: whether to generate initialisation code for \
3317  "face" or "edge" quadrature.
3318 
3319  :raises InternalError: if `qr_type` is not "face" or "edge".
3320 
3321  '''
3322  if qr_type not in ["face", "edge"]:
3323  raise InternalError(
3324  f"_initialise_face_or_edge_qr: qr_type argument must be "
3325  f"either 'face' or 'edge' but got: '{qr_type}'")
3326 
3327  quadrature_name = f"gh_quadrature_{qr_type}"
3328 
3329  if quadrature_name not in self._qr_vars:
3330  return
3331 
3332  api_config = Config.get().api_conf("dynamo0.3")
3333  symbol_table = self._symbol_table
3334 
3335  for qr_arg_name in self._qr_vars[quadrature_name]:
3336  # We generate unique names for the integers holding the numbers
3337  # of quadrature points by appending the name of the quadrature
3338  # argument
3339  decl_list = [
3340  symbol_table.find_or_create_integer_symbol(
3341  name+"_"+qr_arg_name, tag=name+"_"+qr_arg_name).name
3342  for name in self.qr_dim_vars[qr_type]]
3343  parent.add(DeclGen(parent, datatype="integer",
3344  kind=api_config.default_kind["integer"],
3345  entity_decls=decl_list))
3346 
3347  names = [f"{name}_{qr_arg_name}"
3348  for name in self.qr_weight_vars[qr_type]]
3349  decl_list = [
3350  symbol_table.find_or_create_array(name, 2,
3351  ScalarType.Intrinsic.REAL,
3352  tag=name).name
3353  + "(:,:) => null()" for name in names]
3354  const = LFRicConstants()
3355  datatype = const.QUADRATURE_TYPE_MAP[quadrature_name]["intrinsic"]
3356  kind = const.QUADRATURE_TYPE_MAP[quadrature_name]["kind"]
3357  parent.add(
3358  DeclGen(parent, datatype=datatype, pointer=True, kind=kind,
3359  entity_decls=decl_list))
3360  const_mod = const.UTILITIES_MOD_MAP["constants"]["module"]
3361  const_mod_uses = self._invoke.invokes.psy. \
3362  infrastructure_modules[const_mod]
3363  # Record that we will need to import the kind for a
3364  # declaration (associated with quadrature) from the
3365  # appropriate infrastructure module
3366  const_mod_uses.add(kind)
3367  # Get the quadrature proxy
3368  proxy_name = symbol_table.find_or_create_tag(
3369  qr_arg_name+"_proxy").name
3370  parent.add(
3371  AssignGen(parent, lhs=proxy_name,
3372  rhs=qr_arg_name+"%"+"get_quadrature_proxy()"))
3373  # The dimensioning variables required for this quadrature
3374  # (e.g. nedges/nfaces, np_xyz)
3375  for qr_var in self.qr_dim_vars[qr_type]:
3376  parent.add(
3377  AssignGen(parent, lhs=qr_var+"_"+qr_arg_name,
3378  rhs=proxy_name+"%"+qr_var))
3379  # Pointers to the weights arrays
3380  for qr_var in self.qr_weight_vars[qr_type]:
3381  parent.add(
3382  AssignGen(parent, pointer=True,
3383  lhs=qr_var+"_"+qr_arg_name,
3384  rhs=proxy_name+"%"+qr_var))
3385 
3386  def _compute_basis_fns(self, parent):
3387  '''
3388  Generates the necessary Fortran to compute the values of
3389  any basis/diff-basis arrays required
3390 
3391  :param parent: Node in the f2pygen AST which will be the parent
3392  of the assignments created in this routine
3393  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
3394 
3395  '''
3396  # pylint: disable=too-many-locals
3397  const = LFRicConstants()
3398  api_config = Config.get().api_conf("dynamo0.3")
3399 
3400  loop_var_list = set()
3401  op_name_list = []
3402  # add calls to compute the values of any basis arrays
3403  if self._basis_fns:
3404  parent.add(CommentGen(parent, ""))
3405  parent.add(CommentGen(parent, " Compute basis/diff-basis arrays"))
3406  parent.add(CommentGen(parent, ""))
3407 
3408  for basis_fn in self._basis_fns:
3409 
3410  # Currently there are only two possible types of basis function
3411  # and we store the corresponding strings to use in basis_name,
3412  # basis_type, and first_dim. If support for other basis function
3413  # types is added in future then more tests need to be added here.
3414  if basis_fn["type"] == "diff-basis":
3415  basis_name = "gh_diff_basis"
3416  basis_type = "DIFF_BASIS"
3417  first_dim = self.diff_basis_first_dim_name(basis_fn["fspace"])
3418  elif basis_fn["type"] == "basis":
3419  basis_name = "gh_basis"
3420  basis_type = "BASIS"
3421  first_dim = self.basis_first_dim_name(basis_fn["fspace"])
3422  else:
3423  raise InternalError(
3424  f"Unrecognised type of basis function: "
3425  f"'{basis_fn['''type''']}'. Expected one of 'basis' or "
3426  f"'diff-basis'.")
3427  if basis_fn["shape"] in const.VALID_QUADRATURE_SHAPES:
3428  op_name = basis_fn["fspace"].\
3429  get_operator_name(basis_name, qr_var=basis_fn["qr_var"])
3430  if op_name in op_name_list:
3431  # Jump over any basis arrays we've seen before
3432  continue
3433  op_name_list.append(op_name)
3434 
3435  # Create the argument list
3436  args = [basis_type, basis_fn["arg"].proxy_name_indexed + "%" +
3437  basis_fn["arg"].ref_name(basis_fn["fspace"]),
3438  first_dim, basis_fn["fspace"].ndf_name, op_name]
3439 
3440  # insert the basis array call
3441  parent.add(
3442  CallGen(parent,
3443  name=basis_fn["qr_var"]+"%compute_function",
3444  args=args))
3445  elif basis_fn["shape"].lower() == "gh_evaluator":
3446  # We have an evaluator. We may need this on more than one
3447  # function space.
3448  for space in basis_fn["nodal_fspaces"]:
3449  op_name = basis_fn["fspace"].\
3450  get_operator_name(basis_name, on_space=space)
3451  if op_name in op_name_list:
3452  # Jump over any basis arrays we've seen before
3453  continue
3454  op_name_list.append(op_name)
3455 
3456  nodal_loop_var = "df_nodal"
3457  loop_var_list.add(nodal_loop_var)
3458 
3459  # Loop over dofs of target function space
3460  nodal_dof_loop = DoGen(
3461  parent, nodal_loop_var, "1", space.ndf_name)
3462  parent.add(nodal_dof_loop)
3463 
3464  dof_loop_var = "df_" + basis_fn["fspace"].mangled_name
3465  loop_var_list.add(dof_loop_var)
3466 
3467  dof_loop = DoGen(nodal_dof_loop, dof_loop_var,
3468  "1", basis_fn["fspace"].ndf_name)
3469  nodal_dof_loop.add(dof_loop)
3470  lhs = op_name + "(:," + "df_" + \
3471  basis_fn["fspace"].mangled_name + "," + "df_nodal)"
3472  rhs = (f"{basis_fn['arg'].proxy_name_indexed}%"
3473  f"{basis_fn['arg'].ref_name(basis_fn['fspace'])}%"
3474  f"call_function({basis_type},{dof_loop_var},nodes_"
3475  f"{space.mangled_name}(:,{nodal_loop_var}))")
3476  dof_loop.add(AssignGen(dof_loop, lhs=lhs, rhs=rhs))
3477  else:
3478  raise InternalError(
3479  f"Unrecognised shape '{basis_fn['''shape''']}' specified "
3480  f"for basis function. Should be one of: "
3481  f"{const.VALID_EVALUATOR_SHAPES}")
3482  if loop_var_list:
3483  # Declare any loop variables
3484  parent.add(DeclGen(parent, datatype="integer",
3485  kind=api_config.default_kind["integer"],
3486  entity_decls=sorted(loop_var_list)))
3487 
3488  def deallocate(self, parent):
3489  '''
3490  Add code to deallocate all basis/diff-basis function arrays
3491 
3492  :param parent: node in the f2pygen AST to which the deallocate \
3493  calls will be added.
3494  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
3495 
3496  :raises InternalError: if an unrecognised type of basis function \
3497  is encountered.
3498  '''
3499  if self._basis_fns_basis_fns:
3500  # deallocate all allocated basis function arrays
3501  parent.add(CommentGen(parent, ""))
3502  parent.add(CommentGen(parent, " Deallocate basis arrays"))
3503  parent.add(CommentGen(parent, ""))
3504 
3505  func_space_var_names = set()
3506  for basis_fn in self._basis_fns_basis_fns:
3507  # add the basis array name to the list to use later
3508  if basis_fn["type"] == "basis":
3509  basis_name = "gh_basis"
3510  elif basis_fn["type"] == "diff-basis":
3511  basis_name = "gh_diff_basis"
3512  else:
3513  raise InternalError(
3514  f"Unrecognised type of basis function: "
3515  f"'{basis_fn['''type''']}'. Should be one of 'basis' or "
3516  f"'diff-basis'.")
3517  for fspace in basis_fn["nodal_fspaces"]:
3518  op_name = basis_fn["fspace"].\
3519  get_operator_name(basis_name,
3520  qr_var=basis_fn["qr_var"],
3521  on_space=fspace)
3522  func_space_var_names.add(op_name)
3523 
3524  if func_space_var_names:
3525  # add the required deallocate call
3526  parent.add(DeallocateGen(parent, sorted(func_space_var_names)))
3527 
3528 
3530  '''
3531  Manages declarations and initialisation of quantities required by
3532  kernels that need boundary condition information.
3533 
3534  :param node: the Invoke or Kernel stub for which we are to handle \
3535  any boundary conditions.
3536  :type node: :py:class:`psyclone.dynamo0p3.LFRicInvoke` or \
3537  :py:class:`psyclone.domain.lfric.LFRicKern`
3538 
3539  :raises GenerationError: if a kernel named "enforce_bc_code" is found \
3540  but does not have an argument on ANY_SPACE_1.
3541  :raises GenerationError: if a kernel named "enforce_operator_bc_code" is \
3542  found but does not have exactly one argument.
3543  '''
3544  # Define a BoundaryDofs namedtuple to help us manage the arrays that
3545  # are required.
3546  BoundaryDofs = namedtuple("BoundaryDofs", ["argument", "function_space"])
3547 
3548  def __init__(self, node):
3549  super().__init__(node)
3550 
3551  self._boundary_dofs_boundary_dofs = []
3552  # Check through all the kernel calls to see whether any of them
3553  # require boundary conditions. Currently this is done by recognising
3554  # the kernel name.
3555  # pylint: disable=import-outside-toplevel
3557  MetadataToArgumentsRules)
3558  for call in self._calls_calls:
3559  if MetadataToArgumentsRules.bc_kern_regex.match(call.name):
3560  bc_fs = None
3561  for fspace in call.arguments.unique_fss:
3562  if fspace.orig_name == "any_space_1":
3563  bc_fs = fspace
3564  break
3565  if not bc_fs:
3566  raise GenerationError(
3567  "The enforce_bc_code kernel must have an argument on "
3568  "ANY_SPACE_1 but failed to find such an argument.")
3569  farg = call.arguments.get_arg_on_space(bc_fs)
3570  self._boundary_dofs_boundary_dofs.append(self.BoundaryDofsBoundaryDofs(farg, bc_fs))
3571  elif call.name.lower() == "enforce_operator_bc_code":
3572  # Check that the kernel only has one argument
3573  if len(call.arguments.args) != 1:
3574  raise GenerationError(
3575  f"The enforce_operator_bc_code kernel must have "
3576  f"exactly one argument but found "
3577  f"{len(call.arguments.args)}")
3578  op_arg = call.arguments.args[0]
3579  bc_fs = op_arg.function_space_to
3580  self._boundary_dofs_boundary_dofs.append(self.BoundaryDofsBoundaryDofs(op_arg, bc_fs))
3581 
3582  def _invoke_declarations(self, parent):
3583  '''
3584  Add declarations for any boundary-dofs arrays required by an Invoke.
3585 
3586  :param parent: node in the PSyIR to which to add declarations.
3587  :type parent: :py:class:`psyclone.psyir.nodes.Node`
3588 
3589  '''
3590  api_config = Config.get().api_conf("dynamo0.3")
3591 
3592  for dofs in self._boundary_dofs_boundary_dofs:
3593  name = "boundary_dofs_" + dofs.argument.name
3594  parent.add(DeclGen(parent, datatype="integer",
3595  kind=api_config.default_kind["integer"],
3596  pointer=True,
3597  entity_decls=[name+"(:,:) => null()"]))
3598 
3599  def _stub_declarations(self, parent):
3600  '''
3601  Add declarations for any boundary-dofs arrays required by a kernel.
3602 
3603  :param parent: node in the PSyIR to which to add declarations.
3604  :type parent: :py:class:`psyclone.psyir.nodes.Node`
3605 
3606  '''
3607  api_config = Config.get().api_conf("dynamo0.3")
3608 
3609  for dofs in self._boundary_dofs_boundary_dofs:
3610  name = "boundary_dofs_" + dofs.argument.name
3611  ndf_name = dofs.function_space.ndf_name
3612  parent.add(DeclGen(parent, datatype="integer",
3613  kind=api_config.default_kind["integer"],
3614  intent="in",
3615  dimension=",".join([ndf_name, "2"]),
3616  entity_decls=[name]))
3617 
3618  def initialise(self, parent):
3619  '''
3620  Initialise any boundary-dofs arrays required by an Invoke.
3621 
3622  :param parent: node in PSyIR to which to add declarations.
3623  :type parent: :py:class:`psyclone.psyir.nodes.Node`
3624 
3625  '''
3626  for dofs in self._boundary_dofs_boundary_dofs:
3627  name = "boundary_dofs_" + dofs.argument.name
3628  parent.add(AssignGen(
3629  parent, pointer=True, lhs=name,
3630  rhs="%".join([dofs.argument.proxy_name,
3631  dofs.argument.ref_name(dofs.function_space),
3632  "get_boundary_dofs()"])))
3633 
3634 
3636  ''' The Dynamo specific InvokeSchedule sub-class. This passes the Dynamo-
3637  specific factories for creating kernel and infrastructure calls
3638  to the base class so it creates the ones we require.
3639 
3640  :param str name: name of the Invoke.
3641  :param arg: list of KernelCalls parsed from the algorithm layer.
3642  :type arg: list of :py:class:`psyclone.parse.algorithm.KernelCall`
3643  :param reserved_names: optional list of names that are not allowed in the \
3644  new InvokeSchedule SymbolTable.
3645  :type reserved_names: list of str
3646  :param parent: the parent of this node in the PSyIR.
3647  :type parent: :py:class:`psyclone.psyir.nodes.Node`
3648 
3649  '''
3650 
3651  def __init__(self, name, arg, reserved_names=None, parent=None):
3652  super().__init__(name, LFRicKernCallFactory,
3653  LFRicBuiltInCallFactory, arg, reserved_names,
3654  parent=parent, symbol_table=LFRicSymbolTable())
3655 
3656  def node_str(self, colour=True):
3657  ''' Creates a text summary of this node.
3658 
3659  :param bool colour: whether or not to include control codes for colour.
3660 
3661  :returns: text summary of this node, optionally with control codes \
3662  for colour highlighting.
3663  :rtype: str
3664 
3665  '''
3666  return (self.coloured_name(colour) + "[invoke='" + self.invokeinvokeinvoke.name +
3667  "', dm=" + str(Config.get().distributed_memory)+"]")
3668 
3669 
3671  '''
3672  Dynamo specific global sum class which can be added to and
3673  manipulated in a schedule.
3674 
3675  :param scalar: the kernel argument for which to perform a global sum.
3676  :type scalar: :py:class:`psyclone.dynamo0p3.DynKernelArgument`
3677  :param parent: the parent node of this node in the PSyIR.
3678  :type parent: :py:class:`psyclone.psyir.nodes.Node`
3679 
3680  :raises GenerationError: if distributed memory is not enabled.
3681  :raises InternalError: if the supplied argument is not a scalar.
3682  :raises GenerationError: if the scalar is not of "real" intrinsic type.
3683 
3684  '''
3685  def __init__(self, scalar, parent=None):
3686  # Check that distributed memory is enabled
3687  if not Config.get().distributed_memory:
3688  raise GenerationError(
3689  "It makes no sense to create a DynGlobalSum object when "
3690  "distributed memory is not enabled (dm=False).")
3691  # Check that the global sum argument is indeed a scalar
3692  if not scalar.is_scalar:
3693  raise InternalError(
3694  f"DynGlobalSum.init(): A global sum argument should be a "
3695  f"scalar but found argument of type '{scalar.argument_type}'.")
3696  # Check scalar intrinsic types that this class supports (only
3697  # "real" for now)
3698  if scalar.intrinsic_type != "real":
3699  raise GenerationError(
3700  f"DynGlobalSum currently only supports real scalars, but "
3701  f"argument '{scalar.name}' in Kernel '{scalar.call.name}' has "
3702  f"'{scalar.intrinsic_type}' intrinsic type.")
3703  # Initialise the parent class
3704  super().__init__(scalar, parent=parent)
3705 
3706  def gen_code(self, parent):
3707  '''
3708  Dynamo-specific code generation for this class.
3709 
3710  :param parent: f2pygen node to which to add AST nodes.
3711  :type parent: :py:class:`psyclone.f2pygen.SubroutineGen`
3712 
3713  '''
3714  name = self._scalar_scalar.name
3715  # Use InvokeSchedule SymbolTable to share the same symbol for all
3716  # GlobalSums in the Invoke.
3717  sum_name = self.ancestor(InvokeSchedule).symbol_table.\
3718  find_or_create_tag("global_sum").name
3719  sum_type = self._scalar_scalar.data_type
3720  sum_mod = self._scalar_scalar.module_name
3721  parent.add(UseGen(parent, name=sum_mod, only=True,
3722  funcnames=[sum_type]))
3723  parent.add(TypeDeclGen(parent, datatype=sum_type,
3724  entity_decls=[sum_name]))
3725  parent.add(AssignGen(parent, lhs=sum_name+"%value", rhs=name))
3726  parent.add(AssignGen(parent, lhs=name, rhs=sum_name+"%get_sum()"))
3727 
3728 
3729 def _create_depth_list(halo_info_list, sym_table):
3730  '''Halo exchanges may have more than one dependency. This method
3731  simplifies multiple dependencies to remove duplicates and any
3732  obvious redundancy. For example, if one dependency is for depth=1
3733  and another for depth=2 then we do not need the former as it is
3734  covered by the latter. Similarly, if we have a depth=extent+1 and
3735  another for depth=extent+2 then we do not need the former as it is
3736  covered by the latter. It also takes into account
3737  needs_clean_outer, which indicates whether the outermost halo
3738  needs to be clean (and therefore whether there is a dependence).
3739 
3740  :param halo_info_list: a list containing halo access information \
3741  derived from all read fields dependent on this halo exchange.
3742  :type: :func:`list` of :py:class:`psyclone.dynamo0p3.HaloReadAccess`
3743  :param sym_table: the symbol table of the enclosing InvokeSchedule.
3744  :type sym_table: :py:class:`psyclone.psyir.symbols.SymbolTable`
3745 
3746  :returns: a list containing halo depth information derived from \
3747  the halo access information.
3748  :rtype: :func:`list` of :py:class:`psyclone.dynamo0p3.HaloDepth`
3749 
3750  '''
3751  # pylint: disable=too-many-branches
3752  depth_info_list = []
3753  # First look to see if all field dependencies are
3754  # annexed_only. If so we only care about annexed dofs.
3755  annexed_only = True
3756  for halo_info in halo_info_list:
3757  if not (halo_info.annexed_only or
3758  (halo_info.literal_depth == 1
3759  and not halo_info.needs_clean_outer)):
3760  # There are two cases when we only care about accesses to
3761  # annexed dofs. 1) when annexed_only is set and 2) when
3762  # the halo depth is 1 but we only depend on annexed dofs
3763  # being up-to-date (needs_clean_outer is False).
3764  annexed_only = False
3765  break
3766  if annexed_only:
3767  depth_info = HaloDepth(sym_table)
3768  depth_info.set_by_value(max_depth=False, var_depth="",
3769  literal_depth=1, annexed_only=True,
3770  max_depth_m1=False)
3771  return [depth_info]
3772  # Next look to see if one of the field dependencies specifies
3773  # a max_depth access. If so the whole halo region is accessed
3774  # so we do not need to be concerned with other accesses.
3775  max_depth_m1 = False
3776  for halo_info in halo_info_list:
3777  if halo_info.max_depth:
3778  if halo_info.needs_clean_outer:
3779  # Found a max_depth access so we only need one
3780  # HaloDepth entry.
3781  depth_info = HaloDepth(sym_table)
3782  depth_info.set_by_value(max_depth=True, var_depth="",
3783  literal_depth=0, annexed_only=False,
3784  max_depth_m1=False)
3785  return [depth_info]
3786  # Remember that we found a max_depth-1 access.
3787  max_depth_m1 = True
3788 
3789  if max_depth_m1:
3790  # We have at least one max_depth-1 access.
3791  depth_info = HaloDepth(sym_table)
3792  depth_info.set_by_value(max_depth=False, var_depth="",
3793  literal_depth=0, annexed_only=False,
3794  max_depth_m1=True)
3795  depth_info_list.append(depth_info)
3796 
3797  for halo_info in halo_info_list:
3798  # Go through the halo information associated with each
3799  # read dependency, skipping any max_depth-1 accesses.
3800  if halo_info.max_depth and not halo_info.needs_clean_outer:
3801  continue
3802  var_depth = halo_info.var_depth
3803  literal_depth = halo_info.literal_depth
3804  if literal_depth and not halo_info.needs_clean_outer:
3805  # Decrease depth by 1 if we don't care about the outermost
3806  # access.
3807  literal_depth -= 1
3808  match = False
3809  # check whether we match with existing depth information
3810  for depth_info in depth_info_list:
3811  if depth_info.var_depth == var_depth and not match:
3812  # This dependence uses the same variable to
3813  # specify its depth as an existing one, or both do
3814  # not have a variable so we only have a
3815  # literal. Therefore we only need to update the
3816  # literal value with the maximum of the two
3817  # (e.g. var_name,1 and var_name,2 => var_name,2).
3818  depth_info.literal_depth = max(
3819  depth_info.literal_depth, literal_depth)
3820  match = True
3821  break
3822  if not match:
3823  # No matches were found with existing entries so create a
3824  # new one (unless no 'var_depth' and 'literal_depth' is 0).
3825  if var_depth or literal_depth > 0:
3826  depth_info = HaloDepth(sym_table)
3827  depth_info.set_by_value(max_depth=False, var_depth=var_depth,
3828  literal_depth=literal_depth,
3829  annexed_only=False, max_depth_m1=False)
3830  depth_info_list.append(depth_info)
3831  return depth_info_list
3832 
3833 
3835 
3836  '''Dynamo specific halo exchange class which can be added to and
3837  manipulated in a schedule.
3838 
3839  :param field: the field that this halo exchange will act on
3840  :type field: :py:class:`psyclone.dynamo0p3.DynKernelArgument`
3841  :param check_dirty: optional argument default True indicating \
3842  whether this halo exchange should be subject to a run-time check \
3843  for clean/dirty halos.
3844  :type check_dirty: bool
3845  :param vector_index: optional vector index (default None) to \
3846  identify which index of a vector field this halo exchange is \
3847  responsible for
3848  :type vector_index: int
3849  :param parent: optional PSyIRe parent node (default None) of this \
3850  object
3851  :type parent: :py:class:`psyclone.psyir.nodes.Node`
3852 
3853  '''
3854  def __init__(self, field, check_dirty=True,
3855  vector_index=None, parent=None):
3856  HaloExchange.__init__(self, field, check_dirty=check_dirty,
3857  vector_index=vector_index, parent=parent)
3858  # set up some defaults for this class
3859  self._halo_exchange_name_halo_exchange_name = "halo_exchange"
3860 
3861  def _compute_stencil_type(self):
3862  '''Dynamically work out the type of stencil required for this halo
3863  exchange as it could change as transformations are applied to
3864  the schedule. If all stencil accesses are of the same type then we
3865  return that stencil, otherwise we return the "region" stencil
3866  type (as it is safe for all stencils).
3867 
3868  :return: the type of stencil required for this halo exchange
3869  :rtype: str
3870 
3871  '''
3872  # get information about stencil accesses from all read fields
3873  # dependent on this halo exchange
3874  halo_info_list = self._compute_halo_read_info_compute_halo_read_info()
3875 
3876  trial_stencil = halo_info_list[0].stencil_type
3877  for halo_info in halo_info_list:
3878  # assume that if stencil accesses are different that we
3879  # simply revert to region. We could be more clever in the
3880  # future e.g. x and y implies cross.
3881  if halo_info.stencil_type != trial_stencil:
3882  return "region"
3883  return trial_stencil
3884 
3885  def _compute_halo_depth(self):
3886  '''Dynamically determine the depth of the halo for this halo exchange,
3887  as the depth can change as transformations are applied to the
3888  schedule.
3889 
3890  :return: the halo exchange depth as a Fortran string
3891  :rtype: str
3892 
3893  '''
3894  # get information about reading from the halo from all read fields
3895  # dependent on this halo exchange
3896  depth_info_list = self._compute_halo_read_depth_info_compute_halo_read_depth_info()
3897 
3898  # if there is only one entry in the list we can just return
3899  # the depth
3900  if len(depth_info_list) == 1:
3901  return str(depth_info_list[0])
3902  # the depth information can't be reduced to a single
3903  # expression, therefore we need to determine the maximum
3904  # of all expressions
3905  depth_str_list = [str(depth_info) for depth_info in
3906  depth_info_list]
3907  return "max("+",".join(depth_str_list)+")"
3908 
3909  def _psyir_depth_expression(self):
3910  '''
3911  :returns: the PSyIR expression to compute the halo depth.
3912  :rtype: :py:class:`psyclone.psyir.nodes.Node`
3913  '''
3914  depth_info_list = self._compute_halo_read_depth_info_compute_halo_read_depth_info()
3915  if len(depth_info_list) == 1:
3916  return depth_info_list[0].psyir_expression()
3917 
3918  return IntrinsicCall.create(
3919  IntrinsicCall.Intrinsic.MAX,
3920  [depth.psyir_expression() for depth in depth_info_list])
3921 
3922  def _compute_halo_read_depth_info(self, ignore_hex_dep=False):
3923  '''Take a list of `psyclone.dynamo0p3.HaloReadAccess` objects and
3924  create an equivalent list of `psyclone.dynamo0p3.HaloDepth`
3925  objects. Whilst doing this we simplify the
3926  `psyclone.dynamo0p3.HaloDepth` list to remove redundant depth
3927  information e.g. depth=1 is not required if we have a depth=2.
3928  If the optional ignore_hex_dep argument is set to True then
3929  any read accesses contained in halo exchange nodes are
3930  ignored. This option can therefore be used to filter out any
3931  halo exchange dependencies and only return non-halo exchange
3932  dependencies if and when required.
3933 
3934  :param bool ignore_hex_dep: if True then ignore any read \
3935  accesses contained in halo exchanges. This is an optional \
3936  argument that defaults to False.
3937 
3938  :return: a list containing halo depth information derived from \
3939  all fields dependent on this halo exchange.
3940  :rtype: :func:`list` of :py:class:`psyclone.dynamo0p3.HaloDepth`
3941 
3942  '''
3943  # get our halo information
3944  halo_info_list = self._compute_halo_read_info_compute_halo_read_info(ignore_hex_dep)
3945  # use the halo information to generate depth information
3946  depth_info_list = _create_depth_list(halo_info_list,
3947  self._symbol_table_symbol_table)
3948  return depth_info_list
3949 
3950  def _compute_halo_read_info(self, ignore_hex_dep=False):
3951  '''Dynamically computes all halo read dependencies and returns the
3952  required halo information (i.e. halo depth and stencil type)
3953  in a list of HaloReadAccess objects. If the optional
3954  ignore_hex_dep argument is set to True then any read accesses
3955  contained in halo exchange nodes are ignored. This option can
3956  therefore be used to filter out any halo exchange dependencies
3957  and only return non-halo exchange dependencies if and when
3958  required.
3959 
3960  :param bool ignore_hex_dep: if True then ignore any read \
3961  accesses contained in halo exchanges. This is an optional \
3962  argument that defaults to False.
3963 
3964  :return: a list containing halo information for each read dependency.
3965  :rtype: :func:`list` of :py:class:`psyclone.dynamo0p3.HaloReadAccess`
3966 
3967  :raises InternalError: if there is more than one read \
3968  dependency associated with a halo exchange.
3969  :raises InternalError: if there is a read dependency \
3970  associated with a halo exchange and it is not the last \
3971  entry in the read dependency list.
3972  :raises GenerationError: if there is a read dependency \
3973  associated with an asynchronous halo exchange.
3974  :raises InternalError: if no read dependencies are found.
3975 
3976  '''
3977  read_dependencies = self.fieldfield.forward_read_dependencies()
3978  hex_deps = [dep for dep in read_dependencies
3979  if isinstance(dep.call, LFRicHaloExchange)]
3980  if hex_deps:
3981  # There is a field accessed by a halo exchange that is
3982  # a read dependence. As ignore_hex_dep is True this should
3983  # be ignored so this is removed from the list.
3984  if any(dep for dep in hex_deps
3985  if isinstance(dep.call, (LFRicHaloExchangeStart,
3986  LFRicHaloExchangeEnd))):
3987  raise GenerationError(
3988  "Please perform redundant computation transformations "
3989  "before asynchronous halo exchange transformations.")
3990 
3991  # There can only be one field accessed by a
3992  # halo exchange that is a read dependence.
3993  if len(hex_deps) != 1:
3994  raise InternalError(
3995  f"There should only ever be at most one read dependency "
3996  f"associated with a halo exchange in the read dependency "
3997  f"list, but found {len(hex_deps)} for field "
3998  f"{self.field.name}.")
3999  # For sanity, check that the field accessed by the halo
4000  # exchange is the last dependence in the list.
4001  if not isinstance(read_dependencies[-1].call, LFRicHaloExchange):
4002  raise InternalError(
4003  "If there is a read dependency associated with a halo "
4004  "exchange in the list of read dependencies then it should "
4005  "be the last one in the list.")
4006  if ignore_hex_dep:
4007  # Remove the last entry in the list (the field accessed by
4008  # the halo exchange).
4009  del read_dependencies[-1]
4010 
4011  if not read_dependencies:
4012  raise InternalError(
4013  "Internal logic error. There should be at least one read "
4014  "dependence for a halo exchange.")
4015  return [HaloReadAccess(read_dependency, self._symbol_table_symbol_table) for
4016  read_dependency in read_dependencies]
4017 
4018  def _compute_halo_write_info(self):
4019  '''Determines how much of the halo has been cleaned from any previous
4020  redundant computation
4021 
4022  :return: a HaloWriteAccess object containing the required \
4023  information, or None if no dependence information is found.
4024  :rtype: :py:class:`psyclone.dynamo0p3.HaloWriteAccess` or None
4025  :raises GenerationError: if more than one write dependence is \
4026  found for this halo exchange as this should not be possible
4027 
4028  '''
4029  write_dependencies = self.fieldfield.backward_write_dependencies()
4030  if not write_dependencies:
4031  # no write dependence information
4032  return None
4033  if len(write_dependencies) > 1:
4034  raise GenerationError(
4035  f"Internal logic error. There should be at most one write "
4036  f"dependence for a halo exchange. Found "
4037  f"'{len(write_dependencies)}'")
4038  return HaloWriteAccess(write_dependencies[0], self._symbol_table_symbol_table)
4039 
4040  def required(self, ignore_hex_dep=False):
4041  '''Determines whether this halo exchange is definitely required
4042  ``(True, True)``, might be required ``(True, False)`` or is definitely
4043  not required ``(False, *)``.
4044 
4045  If the optional ignore_hex_dep argument is set to True then
4046  any read accesses contained in halo exchange nodes are
4047  ignored. This option can therefore be used to filter out any
4048  halo exchange dependencies and only consider non-halo exchange
4049  dependencies if and when required.
4050 
4051  Whilst a halo exchange is generally only ever added if it is
4052  required, or if it may be required, this situation can change
4053  if redundant computation transformations are applied. The
4054  first argument can be used to remove such halo exchanges if
4055  required.
4056 
4057  When the first return value is True, the second return value
4058  can be used to see if we need to rely on the runtime
4059  (set_dirty and set_clean calls) and therefore add a
4060  check_dirty() call around the halo exchange or whether we
4061  definitely know that this halo exchange is required.
4062 
4063  This routine assumes that a stencil size provided via a
4064  variable may take the value 0. If a variables value is
4065  constrained to be 1, or more, then the logic for deciding
4066  whether a halo exchange is definitely required should be
4067  updated. Note, the routine would still be correct as is, it
4068  would just return more unknown results than it should).
4069 
4070  :param bool ignore_hex_dep: if True then ignore any read \
4071  accesses contained in halo exchanges. This is an optional \
4072  argument that defaults to False.
4073 
4074  :returns: (x, y) where x specifies whether this halo \
4075  exchange is (or might be) required - True, or is not \
4076  required - False. If the first tuple item is True then the \
4077  second argument specifies whether we definitely know that \
4078  we need the HaloExchange - True, or are not sure - False.
4079  :rtype: (bool, bool)
4080 
4081  '''
4082  # pylint: disable=too-many-branches, too-many-return-statements
4083  # get *aggregated* information about halo reads
4084  required_clean_info = self._compute_halo_read_depth_info_compute_halo_read_depth_info(
4085  ignore_hex_dep)
4086  # get information about the halo write
4087  clean_info = self._compute_halo_write_info_compute_halo_write_info()
4088 
4089  # no need to test whether we return at least one read
4090  # dependency as _compute_halo_read_depth_info() raises an
4091  # exception if none are found
4092 
4093  if Config.get().api_conf("dynamo0.3").compute_annexed_dofs and \
4094  len(required_clean_info) == 1 and \
4095  required_clean_info[0].annexed_only:
4096  # We definitely don't need the halo exchange as we
4097  # only read annexed dofs and these are always clean as
4098  # they are computed by default when iterating over
4099  # dofs and kept up-to-date by redundant computation
4100  # when iterating over cells.
4101  required = False
4102  known = True # redundant information as it is always known
4103  return required, known
4104 
4105  if not clean_info:
4106  # this halo exchange has no previous write dependencies so
4107  # we do not know the initial state of the halo. This means
4108  # that we do not know if we need a halo exchange or not
4109  required = True
4110  known = False
4111  return required, known
4112 
4113  if clean_info.max_depth:
4114  if not clean_info.dirty_outer:
4115  # all of the halo is cleaned by redundant computation
4116  # so halo exchange is not required
4117  required = False
4118  known = True # redundant information as it is always known
4119  else:
4120  # the last level halo is dirty
4121  if required_clean_info[0].max_depth:
4122  # we know that we need to clean the outermost halo level
4123  required = True
4124  known = True
4125  else:
4126  # we don't know whether the halo exchange is
4127  # required or not as the reader reads the halo to
4128  # a specified depth but we don't know the depth
4129  # of the halo
4130  required = True
4131  known = False
4132  return required, known
4133 
4134  # at this point we know that clean_info.max_depth is False
4135 
4136  if not clean_info.literal_depth:
4137  # if literal_depth is 0 then the writer does not
4138  # redundantly compute so we definitely need the halo
4139  # exchange
4140  required = True
4141  known = True
4142  return required, known
4143 
4144  if clean_info.literal_depth == 1 and clean_info.dirty_outer:
4145  # the writer redundantly computes in the level 1 halo but
4146  # leaves it dirty (although annexed dofs are now clean).
4147  if len(required_clean_info) == 1 and \
4148  required_clean_info[0].annexed_only:
4149  # we definitely don't need the halo exchange as we
4150  # only read annexed dofs and these have been made
4151  # clean by the redundant computation
4152  required = False
4153  known = True # redundant information as it is always known
4154  else:
4155  # we definitely need the halo exchange as the reader(s)
4156  # require the halo to be clean
4157  required = True
4158  known = True
4159  return required, known
4160 
4161  # At this point we know that the writer cleans the halo to a
4162  # known (literal) depth through redundant computation. We now
4163  # compute this value for use by the logic in the rest of the
4164  # routine.
4165  clean_depth = clean_info.literal_depth
4166  if clean_info.dirty_outer:
4167  # outer layer stays dirty
4168  clean_depth -= 1
4169 
4170  # If a literal value in any of the required clean halo depths
4171  # is greater than the cleaned depth then we definitely need
4172  # the halo exchange (as any additional variable depth would
4173  # increase the required depth value). We only look at the case
4174  # where we have multiple entries as the single entry case is
4175  # dealt with separately
4176  if len(required_clean_info) > 1:
4177  for required_clean in required_clean_info:
4178  if required_clean.literal_depth > clean_depth:
4179  required = True
4180  known = True
4181  return required, known
4182 
4183  # The only other case where we know that a halo exchange is
4184  # required (or not) is where we read the halo to a known
4185  # literal depth. As the read information is aggregated, a known
4186  # literal depth will mean that there is only one
4187  # required_clean_info entry
4188  if len(required_clean_info) == 1:
4189  # the halo might be read to a fixed literal depth
4190  if required_clean_info[0].var_depth or \
4191  required_clean_info[0].max_depth:
4192  # no it isn't so we might need the halo exchange
4193  required = True
4194  known = False
4195  else:
4196  # the halo is read to a fixed literal depth.
4197  required_clean_depth = required_clean_info[0].literal_depth
4198  if clean_depth < required_clean_depth:
4199  # we definitely need this halo exchange
4200  required = True
4201  known = True
4202  else:
4203  # we definitely don't need this halo exchange
4204  required = False
4205  known = True # redundant information as it is always known
4206  return required, known
4207 
4208  # We now know that at least one required_clean entry has a
4209  # variable depth and any required_clean fixed depths are less
4210  # than the cleaned depth so we may need a halo exchange.
4211  required = True
4212  known = False
4213  return required, known
4214 
4215  def node_str(self, colour=True):
4216  ''' Creates a text summary of this HaloExchange node.
4217 
4218  :param bool colour: whether or not to include control codes for colour.
4219 
4220  :returns: text summary of this node, optionally with control codes \
4221  for colour highlighting.
4222  :rtype: str
4223 
4224  '''
4225  _, known = self.requiredrequired()
4226  runtime_check = not known
4227  field_id = self._field_field.name
4228  if self.vector_indexvector_indexvector_index:
4229  field_id += f"({self.vector_index})"
4230  return (f"{self.coloured_name(colour)}[field='{field_id}', "
4231  f"type='{self._compute_stencil_type()}', "
4232  f"depth={self._compute_halo_depth()}, "
4233  f"check_dirty={runtime_check}]")
4234 
4235  def gen_code(self, parent):
4236  '''Dynamo specific code generation for this class.
4237 
4238  :param parent: an f2pygen object that will be the parent of \
4239  f2pygen objects created in this method
4240  :type parent: :py:class:`psyclone.f2pygen.BaseGen`
4241 
4242  '''
4243  parent.add(PSyIRGen(parent, self))
4244 
4246  '''
4247  :returns: this node lowered to language-level PSyIR.
4248  :rtype: :py:class:`psyclone.psyir.nodes.Node`
4249  '''
4250  symbol = DataSymbol(self._field_field.proxy_name, UnresolvedType())
4251  method = self._halo_exchange_name_halo_exchange_name
4252  depth_expr = self._psyir_depth_expression_psyir_depth_expression()
4253 
4254  # Create infrastructure Calls
4255  if self.vector_indexvector_indexvector_index:
4256  idx = Literal(str(self.vector_indexvector_indexvector_index), INTEGER_TYPE)
4257  if_condition = Call.create(
4258  ArrayOfStructuresReference.create(symbol, [idx], ['is_dirty']),
4259  [('depth', depth_expr)])
4260  if_body = Call.create(
4261  ArrayOfStructuresReference.create(
4262  symbol, [idx.copy()], [method]),
4263  [('depth', depth_expr.copy())])
4264  else:
4265  if_condition = Call.create(
4266  StructureReference.create(symbol, ['is_dirty']),
4267  [('depth', depth_expr)])
4268  if_body = Call.create(
4269  StructureReference.create(symbol, [method]),
4270  [('depth', depth_expr.copy())])
4271 
4272  # Add the "if_dirty" check when necessary
4273  _, known = self.requiredrequired()
4274  if not known:
4275  haloex = IfBlock.create(if_condition, [if_body])
4276  else:
4277  haloex = if_body
4278 
4279  self.replace_with(haloex)
4280  return haloex
4281 
4282 
4284  '''The start of an asynchronous halo exchange. This is similar to a
4285  regular halo exchange except that the Fortran name of the call is
4286  different and the routine only reads the data being transferred
4287  (the associated field is specified as having a read access). As a
4288  result this class is not able to determine some important
4289  properties (such as whether the halo exchange is known to be
4290  required or not). This is solved by finding the corresponding
4291  asynchronous halo exchange end (a halo exchange start always has a
4292  corresponding halo exchange end and vice versa) and calling its
4293  methods (a halo exchange end is specified as having readwrite
4294  access to its associated field and therefore is able to determine
4295  the required properties).
4296 
4297  :param field: the field that this halo exchange will act on
4298  :type field: :py:class:`psyclone.dynamo0p3.DynKernelArgument`
4299  :param check_dirty: optional argument (default True) indicating \
4300  whether this halo exchange should be subject to a run-time check \
4301  for clean/dirty halos.
4302  :type check_dirty: bool
4303  :param vector_index: optional vector index (default None) to \
4304  identify which component of a vector field this halo exchange is \
4305  responsible for
4306  :type vector_index: int
4307  :param parent: optional PSyIRe parent node (default None) of this \
4308  object
4309  :type parent: :py:class:`psyclone.psyir.nodes.Node`
4310 
4311  '''
4312  # Textual description of the node.
4313  _text_name = "HaloExchangeStart"
4314  _colour = "yellow"
4315 
4316  def __init__(self, field, check_dirty=True,
4317  vector_index=None, parent=None):
4318  LFRicHaloExchange.__init__(self, field, check_dirty=check_dirty,
4319  vector_index=vector_index, parent=parent)
4320  # Update the field's access appropriately. Here "gh_read"
4321  # specifies that the start of a halo exchange only reads
4322  # the field's data.
4323  self._field_field.access = AccessType.READ
4324  # override appropriate parent class names
4325  self._halo_exchange_name_halo_exchange_name_halo_exchange_name = "halo_exchange_start"
4326 
4327  def _compute_stencil_type(self):
4328  '''Call the required method in the corresponding halo exchange end
4329  object. This is done as the field in halo exchange start is
4330  only read and the dependence analysis beneath this call
4331  requires the field to be modified.
4332 
4333  :return: Return the type of stencil required for this pair of \
4334  halo exchanges
4335  :rtype: str
4336 
4337  '''
4338  # pylint: disable=protected-access
4339  return self._get_hex_end_get_hex_end()._compute_stencil_type()
4340 
4341  def _compute_halo_depth(self):
4342  '''Call the required method in the corresponding halo exchange end
4343  object. This is done as the field in halo exchange start is
4344  only read and the dependence analysis beneath this call
4345  requires the field to be modified.
4346 
4347  :return: Return the halo exchange depth as a Fortran string
4348  :rtype: str
4349 
4350  '''
4351  # pylint: disable=protected-access
4352  return self._get_hex_end_get_hex_end()._compute_halo_depth()
4353 
4354  def _psyir_depth_expression(self):
4355  '''
4356  Call the required method in the corresponding halo exchange end
4357  object. This is done as the field in halo exchange start is
4358  only read and the dependence analysis beneath this call
4359  requires the field to be modified.
4360 
4361  :returns: the PSyIR expression to compute the halo depth.
4362  :rtype: :py:class:`psyclone.psyir.nodes.Node`
4363  '''
4364  return self._get_hex_end_get_hex_end()._psyir_depth_expression()
4365 
4366  def required(self):
4367  '''Call the required method in the corresponding halo exchange end
4368  object. This is done as the field in halo exchange start is
4369  only read and the dependence analysis beneath this call
4370  requires the field to be modified.
4371 
4372  :returns: (x, y) where x specifies whether this halo exchange \
4373  is (or might be) required - True, or is not required \
4374  - False. If the first tuple item is True then the second \
4375  argument specifies whether we definitely know that we need \
4376  the HaloExchange - True, or are not sure - False.
4377  :rtype: (bool, bool)
4378 
4379  '''
4380  return self._get_hex_end_get_hex_end().required()
4381 
4382  def _get_hex_end(self):
4383  '''An internal helper routine for this class which finds the halo
4384  exchange end object corresponding to this halo exchange start
4385  object or raises an exception if one is not found.
4386 
4387  :return: The corresponding halo exchange end object
4388  :rtype: :py:class:`psyclone.dynamo0p3.LFRicHaloExchangeEnd`
4389  :raises GenerationError: If no matching HaloExchangeEnd is \
4390  found, or if the first matching haloexchange that is found is \
4391  not a HaloExchangeEnd
4392 
4393  '''
4394  # Look at all nodes following this one in schedule order
4395  # (which is PSyIRe node order)
4396  for node in self.following():
4397  if self.sameParent(node) and isinstance(node, LFRicHaloExchange):
4398  # Found a following `haloexchange`,
4399  # `haloexchangestart` or `haloexchangeend` PSyIRe node
4400  # that is at the same calling hierarchy level as this
4401  # haloexchangestart
4402  access = DataAccess(self.fieldfield)
4403  if access.overlaps(node.field):
4404  if isinstance(node, LFRicHaloExchangeEnd):
4405  return node
4406  raise GenerationError(
4407  f"Halo exchange start for field '{self.field.name}' "
4408  f"should match with a halo exchange end, but found "
4409  f"{type(node)}")
4410  # no match has been found which is an error as a halo exchange
4411  # start should always have a matching halo exchange end that
4412  # follows it in schedule (PSyIRe sibling) order
4413  raise GenerationError(
4414  f"Halo exchange start for field '{self.field.name}' has no "
4415  f"matching halo exchange end")
4416 
4417 
4419  '''The end of an asynchronous halo exchange. This is similar to a
4420  regular halo exchange except that the Fortran name of the call is
4421  different and the routine only writes to the data being
4422  transferred.
4423 
4424  :param field: the field that this halo exchange will act on
4425  :type field: :py:class:`psyclone.dynamo0p3.DynKernelArgument`
4426  :param check_dirty: optional argument (default True) indicating \
4427  whether this halo exchange should be subject to a run-time check \
4428  for clean/dirty halos.
4429  :type check_dirty: bool
4430  :param vector_index: optional vector index (default None) to \
4431  identify which index of a vector field this halo exchange is \
4432  responsible for
4433  :type vector_index: int
4434  :param parent: optional PSyIRe parent node (default None) of this \
4435  object
4436  :type parent: :py:class:`psyclone.psyir.nodes.Node`
4437 
4438  '''
4439  # Textual description of the node.
4440  _text_name = "HaloExchangeEnd"
4441  _colour = "yellow"
4442 
4443  def __init__(self, field, check_dirty=True,
4444  vector_index=None, parent=None):
4445  LFRicHaloExchange.__init__(self, field, check_dirty=check_dirty,
4446  vector_index=vector_index, parent=parent)
4447  # Update field properties appropriately. The associated field is
4448  # written to. However, a readwrite field access needs to be
4449  # specified as this is required for the halo exchange logic to
4450  # work correctly.
4451  self._field_field.access = AccessType.READWRITE
4452  # override appropriate parent class names
4453  self._halo_exchange_name_halo_exchange_name_halo_exchange_name = "halo_exchange_finish"
4454 
4455 
4456 class HaloDepth():
4457  '''Determines how much of the halo a read to a field accesses (the
4458  halo depth).
4459 
4460  :param sym_table: the symbol table of the enclosing InvokeSchedule.
4461  :type sym_table: :py:class:`psyclone.psyir.symbols.SymbolTable`
4462 
4463  '''
4464  def __init__(self, sym_table):
4465  # literal_depth is used to store any known (literal) component
4466  # of the depth of halo that is accessed. It may not be the
4467  # full depth as there may also be an additional var_depth
4468  # specified.
4469  self._literal_depth_literal_depth = 0
4470  # var_depth is used to store any variable component of the
4471  # depth of halo that is accessed. It may not be the full depth
4472  # as there may also be an additional literal_depth specified.
4473  self._var_depth_var_depth = None
4474  # max_depth specifies whether the full depth of halo (whatever
4475  # that might be) is accessed. If this is set then
4476  # literal_depth, var_depth and max_depth_m1 have no
4477  # meaning. max_depth being False does not necessarily mean the
4478  # full halo depth is not accessed, rather it means that we do
4479  # not know.
4480  self._max_depth_max_depth = False
4481  # max_depth_m1 specifies whether the full depth of halo
4482  # (whatever that might be) apart from the outermost level is
4483  # accessed. If this is set then literal_depth, var_depth and
4484  # max_depth have no meaning.
4485  self._max_depth_m1_max_depth_m1 = False
4486  # annexed only is True if the only access in the halo is for
4487  # annexed dofs
4488  self._annexed_only_annexed_only = False
4489  # Keep a reference to the symbol table so that we can look-up
4490  # variables holding the maximum halo depth.
4491  # FIXME #2503: This can become invalid if the HaloExchange
4492  # containing this HaloDepth changes its ancestors.
4493  self._symbol_table_symbol_table = sym_table
4494 
4495  @property
4496  def annexed_only(self):
4497  '''
4498  :returns: True if only annexed dofs are accessed in the halo and \
4499  False otherwise.
4500  :rtype: bool
4501  '''
4502  return self._annexed_only_annexed_only
4503 
4504  @property
4505  def max_depth(self):
4506  '''
4507  :returns: True if the read to the field is known to access all \
4508  of the halo and False otherwise.
4509  :rtype: bool
4510  '''
4511  return self._max_depth_max_depth
4512 
4513  @property
4514  def max_depth_m1(self):
4515  '''Returns whether the read to the field is known to access all of the
4516  halo except the outermost level or not.
4517 
4518  :returns: True if the read to the field is known to access all \
4519  of the halo except the outermost and False otherwise.
4520  :rtype: bool
4521 
4522  '''
4523  return self._max_depth_m1_max_depth_m1
4524 
4525  @property
4526  def var_depth(self):
4527  '''Returns the name of the variable specifying the depth of halo
4528  access if one is provided. Note, a variable will only be provided for
4529  stencil accesses. Also note, this depth should be added to the
4530  literal_depth to find the total depth.
4531 
4532  :returns: a variable name specifying the halo access depth \
4533  if one exists, and None if not.
4534  :rtype: str
4535 
4536  '''
4537  return self._var_depth_var_depth
4538 
4539  @property
4540  def literal_depth(self):
4541  '''Returns the known fixed (literal) depth of halo access. Note, this
4542  depth should be added to the var_depth to find the total depth.
4543 
4544  :returns: the known fixed (literal) halo access depth.
4545  :rtype: int
4546 
4547  '''
4548  return self._literal_depth_literal_depth
4549 
4550  @literal_depth.setter
4551  def literal_depth(self, value):
4552  ''' Set the known fixed (literal) depth of halo access.
4553 
4554  :param int value: Set the known fixed (literal) halo access depth.
4555 
4556  '''
4557  self._literal_depth_literal_depth = value
4558 
4559  def set_by_value(self, max_depth, var_depth, literal_depth, annexed_only,
4560  max_depth_m1):
4561  # pylint: disable=too-many-arguments
4562  '''Set halo depth information directly
4563 
4564  :param bool max_depth: True if the field accesses all of the \
4565  halo and False otherwise
4566  :param str var_depth: A variable name specifying the halo \
4567  access depth, if one exists, and None if not
4568  :param int literal_depth: The known fixed (literal) halo \
4569  access depth
4570  :param bool annexed_only: True if only the halo's annexed dofs \
4571  are accessed and False otherwise
4572  :param bool max_depth_m1: True if the field accesses all of \
4573  the halo but does not require the outermost halo to be correct \
4574  and False otherwise
4575 
4576  '''
4577  self._max_depth_max_depth = max_depth
4578  self._var_depth_var_depth = var_depth
4579  self._literal_depth_literal_depth = literal_depth
4580  self._annexed_only_annexed_only = annexed_only
4581  self._max_depth_m1_max_depth_m1 = max_depth_m1
4582 
4583  def __str__(self):
4584  '''return the depth of a halo dependency
4585  as a string'''
4586  depth_str = ""
4587  if self.max_depthmax_depth:
4588  max_depth = self._symbol_table_symbol_table.lookup_with_tag(
4589  "max_halo_depth_mesh")
4590  depth_str += max_depth.name
4591  elif self.max_depth_m1max_depth_m1:
4592  max_depth = self._symbol_table_symbol_table.lookup_with_tag(
4593  "max_halo_depth_mesh")
4594  depth_str += f"{max_depth.name}-1"
4595  else:
4596  if self.var_depthvar_depth:
4597  depth_str += self.var_depthvar_depth
4598  if self.literal_depthliteral_depthliteral_depth:
4599  # Ignores depth == 0
4600  depth_str += f"+{self.literal_depth}"
4601  elif self.literal_depthliteral_depthliteral_depth is not None:
4602  # Returns depth if depth has any value, including 0
4603  depth_str = str(self.literal_depthliteral_depthliteral_depth)
4604  return depth_str
4605 
4606  def psyir_expression(self):
4607  '''
4608  :returns: the PSyIR expression representing this HaloDepth.
4609  :rtype: :py:class:`psyclone.psyir.nodes.Node`
4610  '''
4611  if self.max_depthmax_depth:
4612  max_depth = self._symbol_table_symbol_table.lookup_with_tag(
4613  "max_halo_depth_mesh")
4614  return Reference(max_depth)
4615  if self.max_depth_m1max_depth_m1:
4616  max_depth = self._symbol_table_symbol_table.lookup_with_tag(
4617  "max_halo_depth_mesh")
4618  return BinaryOperation.create(
4619  BinaryOperation.Operator.SUB,
4620  Reference(max_depth),
4621  Literal('1', INTEGER_TYPE))
4622  if self.var_depthvar_depth:
4623  depth_ref = Reference(self._symbol_table_symbol_table.lookup(self.var_depthvar_depth))
4624  if self.literal_depthliteral_depthliteral_depth != 0: # Ignores depth == 0
4625  return BinaryOperation.create(
4626  BinaryOperation.Operator.ADD,
4627  depth_ref,
4628  Literal(f"{self.literal_depth}", INTEGER_TYPE))
4629  return depth_ref
4630  # Returns depth if depth has any value, including 0
4631  return Literal(str(self.literal_depthliteral_depthliteral_depth), INTEGER_TYPE)
4632 
4633 
4634 def halo_check_arg(field, access_types):
4635  '''
4636  Support function which performs checks to ensure the first argument
4637  is a field, that the field is contained within Kernel or Builtin
4638  call and that the field is accessed in one of the ways specified
4639  by the second argument. If no error is reported it returns the
4640  call object containing this argument.
4641 
4642  :param field: the argument object we are checking
4643  :type field: :py:class:`psyclone.dynamo0p3.DynArgument`
4644  :param access_types: List of allowed access types.
4645  :type access_types: List of :py:class:`psyclone.psyGen.AccessType`.
4646  :return: the call containing the argument object
4647  :rtype: sub-class of :py:class:`psyclone.psyGen.Kern`
4648 
4649  :raises GenerationError: if the first argument to this function is \
4650  the wrong type.
4651  :raises GenerationError: if the first argument is not accessed in one of \
4652  the ways specified by the second argument to the function.
4653  :raises GenerationError: if the first argument is not contained \
4654  within a call object.
4655 
4656  '''
4657  try:
4658  # Get the kernel/built-in call associated with this field
4659  call = field.call
4660  except AttributeError as err:
4661  raise GenerationError(
4662  f"HaloInfo class expects an argument of type DynArgument, or "
4663  f"equivalent, on initialisation, but found, "
4664  f"'{type(field)}'") from err
4665 
4666  if field.access not in access_types:
4667  api_strings = [access.api_specific_name() for access in access_types]
4668  raise GenerationError(
4669  f"In HaloInfo class, field '{field.name}' should be one of "
4670  f"{api_strings}, but found '{field.access.api_specific_name()}'")
4671  if not isinstance(call, (LFRicBuiltIn, LFRicKern)):
4672  raise GenerationError(
4673  f"In HaloInfo class, field '{field.name}' should be from a call "
4674  f"but found {type(call)}")
4675  return call
4676 
4677 
4679  '''Determines how much of a field's halo is written to (the halo depth)
4680  when a field is accessed in a particular kernel within a
4681  particular loop nest.
4682 
4683  :param field: the field that we are concerned with.
4684  :type field: :py:class:`psyclone.dynamo0p3.DynArgument`
4685  :param sym_table: the symbol table associated with the scoping region \
4686  that contains this halo access.
4687  :type sym_table: :py:class:`psyclone.psyir.symbols.SymbolTable`
4688 
4689  '''
4690  def __init__(self, field, sym_table):
4691  HaloDepth.__init__(self, sym_table)
4692  self._compute_from_field_compute_from_field(field)
4693 
4694  @property
4695  def dirty_outer(self):
4696  '''Returns True if the writer is continuous and accesses the halo and
4697  False otherwise. It indicates that the outer level of halo that has
4698  been written to is actually dirty (well to be precise it is a partial
4699  sum).
4700 
4701  :returns: True if the outer layer of halo that is written \
4702  to remains dirty and False otherwise.
4703  :rtype: bool
4704 
4705  '''
4706  return self._dirty_outer_dirty_outer
4707 
4708  def _compute_from_field(self, field):
4709  '''Internal method to compute what parts of a field's halo are written
4710  to in a certain kernel and loop. The information computed is
4711  the depth of access and validity of the data after
4712  writing. The depth of access can be the maximum halo depth or
4713  a literal depth and the outer halo layer that is written to
4714  may be dirty or clean.
4715 
4716  :param field: the field that we are concerned with.
4717  :type field: :py:class:`psyclone.dynamo0p3.DynArgument`
4718 
4719  '''
4720  const = LFRicConstants()
4721 
4722  call = halo_check_arg(field, AccessType.all_write_accesses())
4723  # no test required here as all calls exist within a loop
4724 
4725  loop = call.parent.parent
4726  # The outermost halo level that is written to is dirty if it
4727  # is a continuous field which writes into the halo in a loop
4728  # over cells
4729  self._dirty_outer_dirty_outer = (
4730  not field.discontinuous and
4731  loop.iteration_space == "cell_column" and
4732  loop.upper_bound_name in const.HALO_ACCESS_LOOP_BOUNDS)
4733  depth = 0
4734  max_depth = False
4735  if loop.upper_bound_name in const.HALO_ACCESS_LOOP_BOUNDS:
4736  # loop does redundant computation
4737  if loop.upper_bound_halo_depth:
4738  # loop redundant computation is to a fixed literal depth
4739  depth = loop.upper_bound_halo_depth
4740  else:
4741  # loop redundant computation is to the maximum depth
4742  max_depth = True
4743  # If this is an inter-grid kernel and we're writing to the
4744  # field on the fine mesh then the halo depth is effectively
4745  # doubled
4746  if call.is_intergrid and field.mesh == "gh_fine":
4747  depth *= 2
4748  # The third argument for set_by_value specifies the name of a
4749  # variable used to specify the depth. Variables are currently
4750  # not used when a halo is written to, so we pass None which
4751  # indicates there is no variable. the fifth argument for
4752  # set_by_value indicates whether we only access
4753  # annexed_dofs. At the moment this is not possible when
4754  # modifying a field so we always return False. The sixth
4755  # argument indicates if the depth of access is the
4756  # maximum-1. This is not possible here so we return False.
4757  HaloDepth.set_by_value(self, max_depth, None, depth, False, False)
4758 
4759 
4761  '''Determines how much of a field's halo is read (the halo depth) and
4762  additionally the access pattern (the stencil) when a field is
4763  accessed in a particular kernel within a particular loop nest.
4764 
4765  :param field: the field for which we want information.
4766  :type field: :py:class:`psyclone.dynamo0p3.DynKernelArgument`
4767  :param sym_table: the symbol table associated with the scoping region \
4768  that contains this halo access.
4769  :type sym_table: :py:class:`psyclone.psyir.symbols.SymbolTable`
4770 
4771  '''
4772  def __init__(self, field, sym_table):
4773  HaloDepth.__init__(self, sym_table)
4774  self._stencil_type_stencil_type = None
4775  self._needs_clean_outer_needs_clean_outer = None
4776  self._compute_from_field_compute_from_field(field)
4777 
4778  @property
4780  '''Returns False if the reader has a gh_inc access and accesses the
4781  halo. Otherwise returns True. Indicates that the outer level
4782  of halo that has been read does not need to be clean (although
4783  any annexed dofs do).
4784 
4785  :return: Returns False if the outer layer of halo that is read \
4786  does not need to be clean and True otherwise.
4787  :rtype: bool
4788 
4789  '''
4790  return self._needs_clean_outer_needs_clean_outer
4791 
4792  @property
4793  def stencil_type(self):
4794  '''Returns the type of stencil access used by the field(s) in the halo
4795  if one exists. If redundant computation (accessing the full
4796  halo) is combined with a stencil access (potentially accessing
4797  a subset of the halo) then the access is assumed to be full
4798  access (region) for all depths.
4799 
4800  :returns: the type of stencil access used or None if there is no \
4801  stencil.
4802  :rtype: str
4803 
4804  '''
4805  return self._stencil_type_stencil_type
4806 
4807  def _compute_from_field(self, field):
4808  '''Internal method to compute which parts of a field's halo are read
4809  in a certain kernel and loop. The information computed is the
4810  depth of access and the access pattern. The depth of access
4811  can be the maximum halo depth, a variable specifying the depth
4812  and/or a literal depth. The access pattern will only be
4813  specified if the kernel code performs a stencil access on the
4814  field.
4815 
4816  :param field: the field that we are concerned with
4817  :type field: :py:class:`psyclone.dynamo0p3.DynArgument`
4818 
4819  '''
4820  # pylint: disable=too-many-branches
4821  const = LFRicConstants()
4822 
4823  self._annexed_only_annexed_only_annexed_only = False
4824  call = halo_check_arg(field, AccessType.all_read_accesses())
4825 
4826  loop = call.ancestor(LFRicLoop)
4827 
4828  # For GH_INC we accumulate contributions into the field being
4829  # modified. In order to get correct results for owned and
4830  # annexed dofs, this requires that the fields we are
4831  # accumulating contributions from have up-to-date values in
4832  # the halo cell(s). However, we do not need to be concerned
4833  # with the values of the modified field in the last-level of
4834  # the halo. This is because we only have enough information to
4835  # partially compute the contributions in those cells
4836  # anyway. (If the values of the field being modified are
4837  # required, at some later point, in that level of the halo
4838  # then we do a halo swap.)
4839  self._needs_clean_outer_needs_clean_outer = (
4840  not (field.access == AccessType.INC
4841  and loop.upper_bound_name in ["cell_halo",
4842  "colour_halo"]))
4843  # now we have the parent loop we can work out what part of the
4844  # halo this field accesses
4845  if loop.upper_bound_name in const.HALO_ACCESS_LOOP_BOUNDS:
4846  # this loop performs redundant computation
4847  if loop.upper_bound_halo_depth:
4848  # loop redundant computation is to a fixed literal depth
4849  self._literal_depth_literal_depth_literal_depth = loop.upper_bound_halo_depth
4850  else:
4851  # loop redundant computation is to the maximum depth
4852  self._max_depth_max_depth_max_depth = True
4853  elif loop.upper_bound_name == "ncolour":
4854  # Loop is coloured but does not access the halo.
4855  pass
4856  elif loop.upper_bound_name in ["ncells", "nannexed"]:
4857  if field.descriptor.stencil:
4858  # no need to worry about annexed dofs (if they exist)
4859  # as the stencil will cover these (this is currently
4860  # guaranteed as halo exchanges only exchange full
4861  # halos)
4862  pass
4863  else: # there is no stencil
4864  if (field.discontinuous or call.iterates_over == "dof" or
4865  call.all_updates_are_writes):
4866  # There are only local accesses or the kernel is of the
4867  # special form where any iteration is guaranteed to write
4868  # the same value to a given shared entity.
4869  pass
4870  else:
4871  # This is a continuous field which therefore
4872  # accesses annexed dofs. We set access to the
4873  # level 1 halo here as there is currently no
4874  # mechanism to perform a halo exchange solely on
4875  # annexed dofs.
4876  self._literal_depth_literal_depth_literal_depth = 1
4877  self._annexed_only_annexed_only_annexed_only = True
4878  elif loop.upper_bound_name == "ndofs":
4879  # we only access owned dofs so there is no access to the
4880  # halo
4881  pass
4882  else:
4883  raise GenerationError(
4884  f"Internal error in HaloReadAccess._compute_from_field. Found "
4885  f"unexpected loop upper bound name '{loop.upper_bound_name}'")
4886 
4887  if self._max_depth_max_depth_max_depth or self._var_depth_var_depth_var_depth or self._literal_depth_literal_depth_literal_depth:
4888  # Whilst stencil type has no real meaning when there is no
4889  # stencil it is convenient to set it to "region" when
4890  # there is redundant computation as the halo exchange
4891  # logic is interested in the access pattern irrespective
4892  # of whether there is a stencil access or not. We use
4893  # "region" as it means access all of the halo data which
4894  # is what is done when performing redundant computation
4895  # with no stencil.
4896  self._stencil_type_stencil_type = "region"
4897  if field.descriptor.stencil:
4898  # field has a stencil access
4899  if self._max_depth_max_depth_max_depth:
4900  raise GenerationError(
4901  "redundant computation to max depth with a stencil is "
4902  "invalid")
4903  self._stencil_type_stencil_type = field.descriptor.stencil['type']
4904  if self._literal_depth_literal_depth_literal_depth:
4905  # halo exchange does not support mixed accesses to the halo
4906  self._stencil_type_stencil_type = "region"
4907  stencil_depth = field.descriptor.stencil['extent']
4908  if stencil_depth:
4909  # stencil_depth is provided in the kernel metadata
4910  self._literal_depth_literal_depth_literal_depth += stencil_depth
4911  else:
4912  # Stencil_depth is provided by the algorithm layer.
4913  # It is currently not possible to specify kind for an
4914  # integer literal stencil depth in a kernel call. This
4915  # will be enabled when addressing issue #753.
4916  if field.stencil.extent_arg.is_literal():
4917  # a literal is specified
4918  value_str = field.stencil.extent_arg.text
4919  self._literal_depth_literal_depth_literal_depth += int(value_str)
4920  else:
4921  # a variable is specified
4922  self._var_depth_var_depth_var_depth = field.stencil.extent_arg.varname
4923  # If this is an intergrid kernel and the field in question is on
4924  # the fine mesh then we must double the halo depth
4925  if call.is_intergrid and field.mesh == "gh_fine":
4926  if self._literal_depth_literal_depth_literal_depth:
4927  self._literal_depth_literal_depth_literal_depth *= 2
4928  if self._var_depth_var_depth_var_depth:
4929  self._var_depth_var_depth_var_depth = "2*" + self._var_depth_var_depth_var_depth
4930 
4931 
4933  ''' Provides information about a particular function space used by
4934  a meta-funcs entry in the kernel metadata. '''
4935 
4936  def __init__(self, descriptor):
4937  self._descriptor_descriptor = descriptor
4938 
4939  @property
4940  def requires_basis(self):
4941  ''' Returns True if a basis function is associated with this
4942  function space, otherwise it returns False. '''
4943  return "gh_basis" in self._descriptor_descriptor.operator_names
4944 
4945  @property
4947  ''' Returns True if a differential basis function is
4948  associated with this function space, otherwise it returns
4949  False. '''
4950  return "gh_diff_basis" in self._descriptor_descriptor.operator_names
4951 
4952  @property
4953  def fs_name(self):
4954  ''' Returns the raw metadata value of this function space. '''
4955  return self._descriptor_descriptor.function_space_name
4956 
4957 
4959  ''' Contains a collection of FSDescriptor objects and methods
4960  that provide information across these objects. We have one
4961  FSDescriptor for each meta-funcs entry in the kernel
4962  metadata.
4963  # TODO #274 this should actually be named something like
4964  BasisFuncDescriptors as it holds information describing the
4965  basis/diff-basis functions required by a kernel.
4966 
4967  :param descriptors: list of objects describing the basis/diff-basis \
4968  functions required by a kernel, as obtained from \
4969  metadata.
4970  :type descriptors: list of :py:class:`psyclone.DynFuncDescriptor03`.
4971 
4972  '''
4973  def __init__(self, descriptors):
4974  self._orig_descriptors_orig_descriptors = descriptors
4975  self._descriptors_descriptors = []
4976  for descriptor in descriptors:
4977  self._descriptors_descriptors.append(FSDescriptor(descriptor))
4978 
4979  def exists(self, fspace):
4980  ''' Return True if a descriptor with the specified function
4981  space exists, otherwise return False. '''
4982  for descriptor in self._descriptors_descriptors:
4983  # FS descriptors hold information taken from the kernel
4984  # metadata and therefore it is the original name of
4985  # the supplied function space that we must look at
4986  if descriptor.fs_name == fspace.orig_name:
4987  return True
4988  return False
4989 
4990  def get_descriptor(self, fspace):
4991  ''' Return the descriptor with the specified function space
4992  name. If it does not exist raise an error.'''
4993  for descriptor in self._descriptors_descriptors:
4994  if descriptor.fs_name == fspace.orig_name:
4995  return descriptor
4996  raise GenerationError(
4997  f"FSDescriptors:get_descriptor: there is no descriptor for "
4998  f"function space {fspace.orig_name}")
4999 
5000  @property
5001  def descriptors(self):
5002  '''
5003  :return: the list of Descriptors, one for each of the meta-funcs
5004  entries in the kernel metadata.
5005  :rtype: List of :py:class:`psyclone.dynamo0p3.FSDescriptor`
5006  '''
5007  return self._descriptors_descriptors
5008 
5009 
5010 def check_args(call):
5011  '''
5012  Checks that the kernel arguments provided via the invoke call are
5013  consistent with the information expected, as specified by the
5014  kernel metadata
5015 
5016  :param call: the object produced by the parser that describes the
5017  kernel call to be checked.
5018  :type call: :py:class:`psyclone.parse.algorithm.KernelCall`
5019  :raises: GenerationError if the kernel arguments in the Algorithm layer
5020  do not match up with the kernel metadata
5021  '''
5022  # stencil arguments
5023  stencil_arg_count = 0
5024  for arg_descriptor in call.ktype.arg_descriptors:
5025  if arg_descriptor.stencil:
5026  if not arg_descriptor.stencil['extent']:
5027  # an extent argument must be provided
5028  stencil_arg_count += 1
5029  if arg_descriptor.stencil['type'] == 'xory1d':
5030  # a direction argument must be provided
5031  stencil_arg_count += 1
5032 
5033  const = LFRicConstants()
5034  # Quadrature arguments - will have as many as there are distinct
5035  # quadrature shapes specified in the metadata.
5036  qr_arg_count = len(set(call.ktype.eval_shapes).intersection(
5037  set(const.VALID_QUADRATURE_SHAPES)))
5038 
5039  expected_arg_count = len(call.ktype.arg_descriptors) + \
5040  stencil_arg_count + qr_arg_count
5041 
5042  if expected_arg_count != len(call.args):
5043  raise GenerationError(
5044  f"error: expected '{expected_arg_count}' arguments in the "
5045  f"algorithm layer but found '{len(call.args)}'. Expected "
5046  f"'{len(call.ktype.arg_descriptors)}' standard arguments, "
5047  f"'{stencil_arg_count}' tencil arguments and '{qr_arg_count}' "
5048  f"qr_arguments'")
5049 
5050 
5051 @dataclass(frozen=True)
5053  '''
5054  Provides stencil information about an LFRic kernel argument.
5055  LFRicArgStencil can provide the extent, algorithm argument for the extent,
5056  and the direction argument of a stencil or set any of these properties.
5057 
5058  :param name: the name of the stencil.
5059  :param extent: the extent of the stencil if it is known. It will
5060  be known if it is specified in the metadata.
5061  :param extent_arg: the algorithm argument associated with the extent
5062  value if extent was not found in the metadata.
5063  :param direction_arg: the direction argument associated with the
5064  direction of the stencil if the direction of the
5065  stencil is not known.
5066  '''
5067  name: str
5068  extent: str = None
5069  extent_arg: Any = None
5070  direction_arg: Any = None
5071 
5072 
5074  '''
5075  Provides information about Dynamo kernel call arguments
5076  collectively, as specified by the kernel argument metadata.
5077 
5078  :param call: the kernel metadata for which to extract argument info.
5079  :type call: :py:class:`psyclone.parse.KernelCall`
5080  :param parent_call: the kernel-call object.
5081  :type parent_call: :py:class:`psyclone.domain.lfric.LFRicKern`
5082  :param bool check: whether to check for consistency between the \
5083  kernel metadata and the algorithm layer. Defaults to True.
5084 
5085  :raises GenerationError: if the kernel metadata specifies stencil extent.
5086  '''
5087  def __init__(self, call, parent_call, check=True):
5088  # pylint: disable=too-many-branches
5089  if False: # pylint: disable=using-constant-test
5090  # For pyreverse
5091  self._0_to_n_0_to_n = DynKernelArgument(None, None, None, None)
5092 
5093  Arguments.__init__(self, parent_call)
5094 
5095  # check that the arguments provided by the algorithm layer are
5096  # consistent with those expected by the kernel(s)
5097  check_args(call)
5098 
5099  # create our arguments and add in stencil information where
5100  # appropriate.
5101  self._args_args_args = []
5102  idx = 0
5103  for arg in call.ktype.arg_descriptors:
5104  dyn_argument = DynKernelArgument(self, arg, call.args[idx],
5105  parent_call, check)
5106  idx += 1
5107  if dyn_argument.descriptor.stencil:
5108  if dyn_argument.descriptor.stencil['extent']:
5109  raise GenerationError("extent metadata not yet supported")
5110  # if supported we would add the following
5111  # line: stencil.extent =
5112  # dyn_argument.descriptor.stencil['extent']
5113  # An extent argument has been added.
5114  stencil_extent_arg = call.args[idx]
5115  idx += 1
5116  if dyn_argument.descriptor.stencil['type'] == 'xory1d':
5117  # a direction argument has been added
5118  stencil = LFRicArgStencil(
5119  name=dyn_argument.descriptor.stencil['type'],
5120  extent_arg=stencil_extent_arg,
5121  direction_arg=call.args[idx]
5122  )
5123  idx += 1
5124  else:
5125  # Create a stencil object and store a reference to it in
5126  # our new DynKernelArgument object.
5127  stencil = LFRicArgStencil(
5128  name=dyn_argument.descriptor.stencil['type'],
5129  extent_arg=stencil_extent_arg
5130  )
5131  dyn_argument.stencil = stencil
5132  self._args_args_args.append(dyn_argument)
5133 
5134  # We have now completed the construction of the kernel arguments so
5135  # we can go back and update the names of any stencil size and/or
5136  # direction variable names to ensure there are no clashes.
5137  if self._parent_call_parent_call:
5138  inv_sched = self._parent_call_parent_call.ancestor(InvokeSchedule)
5139  if hasattr(inv_sched, "symbol_table"):
5140  symtab = inv_sched.symbol_table
5141  else:
5142  # This can happen in stub generation.
5143  symtab = LFRicSymbolTable()
5144  else:
5145  # TODO 719 The symtab is not connected to other parts of the
5146  # Stub generation.
5147  symtab = LFRicSymbolTable()
5148  const = LFRicConstants()
5149  for arg in self._args_args_args:
5150  if not arg.descriptor.stencil:
5151  continue
5152  if not arg.stencil.extent_arg.is_literal():
5153  if arg.stencil.extent_arg.varname:
5154  # Ensure extent argument name is registered in the
5155  # symbol_table.
5156  tag = "AlgArgs_" + arg.stencil.extent_arg.text
5157  root = arg.stencil.extent_arg.varname
5158  new_name = symtab.find_or_create_tag(tag, root).name
5159  arg.stencil.extent_arg.varname = new_name
5160  if arg.descriptor.stencil['type'] == 'xory1d':
5161  # a direction argument has been added
5162  if arg.stencil.direction_arg.varname and \
5163  arg.stencil.direction_arg.varname not in \
5164  const.VALID_STENCIL_DIRECTIONS:
5165  # Register the name of the direction argument to ensure
5166  # it is unique in the PSy layer
5167  tag = "AlgArgs_" + arg.stencil.direction_arg.text
5168  root = arg.stencil.direction_arg.varname
5169  new_name = symtab.find_or_create_integer_symbol(
5170  root, tag=tag).name
5171  arg.stencil.direction_arg.varname = new_name
5172 
5173  self._dofs_dofs = []
5174 
5175  # Generate a static list of unique function-space names used
5176  # by the set of arguments: store the mangled names as these
5177  # are what we use at the level of an Invoke
5178  self._unique_fs_names_unique_fs_names = []
5179  # List of corresponding unique function-space objects
5180  self._unique_fss_unique_fss = []
5181  for arg in self._args_args_args:
5182  for function_space in arg.function_spaces:
5183  # We check that function_space is not None because scalar
5184  # args don't have one and fields only have one (only
5185  # operators have two).
5186  if function_space and \
5187  function_space.mangled_name not in self._unique_fs_names_unique_fs_names:
5188  self._unique_fs_names_unique_fs_names.append(function_space.mangled_name)
5189  self._unique_fss_unique_fss.append(function_space)
5190 
5191  def get_arg_on_space_name(self, func_space_name):
5192  '''
5193  Returns the first argument (field or operator) found that is on
5194  the named function space, as specified in the kernel metadata. Also
5195  returns the associated FunctionSpace object.
5196 
5197  :param str func_space_name: Name of the function space (as specified \
5198  in kernel metadata) for which to \
5199  find an argument.
5200  :return: the first kernel argument that is on the named function \
5201  space and the associated FunctionSpace object.
5202  :rtype: (:py:class:`psyclone.dynamo0p3.DynKernelArgument`,
5203  :py:class:`psyclone.domain.lfric.FunctionSpace`)
5204  :raises: FieldNotFoundError if no field or operator argument is found \
5205  for the named function space.
5206  '''
5207  for arg in self._args_args_args:
5208  for function_space in arg.function_spaces:
5209  if function_space:
5210  if func_space_name == function_space.orig_name:
5211  return arg, function_space
5212  raise FieldNotFoundError(f"DynKernelArguments:get_arg_on_space_name: "
5213  f"there is no field or operator with "
5214  f"function space {func_space_name}")
5215 
5216  def get_arg_on_space(self, func_space):
5217  '''
5218  Returns the first argument (field or operator) found that is on
5219  the specified function space. The mangled name of the supplied
5220  function space is used for comparison.
5221 
5222  :param func_space: The function space for which to find an argument.
5223  :type func_space: :py:class:`psyclone.domain.lfric.FunctionSpace`
5224  :return: the first kernel argument that is on the supplied function
5225  space
5226  :rtype: :py:class:`psyclone.dynamo0p3.DynKernelArgument`
5227  :raises: FieldNotFoundError if no field or operator argument is found
5228  for the specified function space.
5229  '''
5230  for arg in self._args_args_args:
5231  for function_space in arg.function_spaces:
5232  if function_space:
5233  if func_space.mangled_name == function_space.mangled_name:
5234  return arg
5235 
5236  raise FieldNotFoundError(f"DynKernelArguments:get_arg_on_space: there "
5237  f"is no field or operator with function space"
5238  f" {func_space.orig_name} (mangled name = "
5239  f"'{func_space.mangled_name}')")
5240 
5241  def has_operator(self, op_type=None):
5242  ''' Returns true if at least one of the arguments is an operator
5243  of type op_type (either gh_operator [LMA] or gh_columnwise_operator
5244  [CMA]). If op_type is None then searches for *any* valid operator
5245  type. '''
5246  const = LFRicConstants()
5247  if op_type and op_type not in const.VALID_OPERATOR_NAMES:
5248  raise GenerationError(
5249  f"If supplied, 'op_type' must be a valid operator type (one "
5250  f"of {const.VALID_OPERATOR_NAMES}) but got '{op_type}'")
5251  if not op_type:
5252  # If no operator type is specified then we match any type
5253  op_list = const.VALID_OPERATOR_NAMES
5254  else:
5255  op_list = [op_type]
5256  for arg in self._args_args_args:
5257  if arg.argument_type in op_list:
5258  return True
5259  return False
5260 
5261  @property
5262  def unique_fss(self):
5263  ''' Returns a unique list of function space objects used by the
5264  arguments of this kernel '''
5265  return self._unique_fss_unique_fss
5266 
5267  @property
5268  def unique_fs_names(self):
5269  ''' Return the list of unique function space names used by the
5270  arguments of this kernel. The names are unmangled (i.e. as
5271  specified in the kernel metadata) '''
5272  return self._unique_fs_names_unique_fs_names
5273 
5275  '''
5276  Returns an argument we can use to dereference the iteration
5277  space. This can be a field or operator that is modified or
5278  alternatively a field that is read if one or more scalars
5279  are modified. If a kernel writes to more than one argument then
5280  that requiring the largest iteration space is selected.
5281 
5282  :return: Kernel argument from which to obtain iteration space
5283  :rtype: :py:class:`psyclone.dynamo0p3.DynKernelArgument`
5284  '''
5285 
5286  # Since we always compute operators out to the L1 halo we first
5287  # check whether this kernel writes to an operator
5288  write_accesses = AccessType.all_write_accesses()
5289  const = LFRicConstants()
5290  op_args = psyGen.args_filter(
5291  self._args_args_args,
5292  arg_types=const.VALID_OPERATOR_NAMES,
5293  arg_accesses=write_accesses)
5294  if op_args:
5295  return op_args[0]
5296 
5297  # Is this an inter-grid kernel? If so, then the iteration space
5298  # is determined by the coarse mesh, irrespective of whether
5299  # we are prolonging (and thus writing to a field on the fine mesh)
5300  # or restricting.
5301  if self._parent_call_parent_call.is_intergrid:
5302  fld_args = psyGen.args_filter(
5303  self._args_args_args,
5304  arg_types=const.VALID_FIELD_NAMES,
5305  arg_meshes=["gh_coarse"])
5306  return fld_args[0]
5307 
5308  # This is not an inter-grid kernel and it does not write to an
5309  # operator. We now check for fields that are written to. We
5310  # check first for any modified field on a continuous function
5311  # space, failing that we try any_space function spaces
5312  # (because we must assume such a space is continuous) and
5313  # finally we try all discontinuous function spaces including
5314  # any_discontinuous_space. We do this because if a quantity on
5315  # a continuous FS is modified then our iteration space must be
5316  # larger (include L1-halo cells)
5317  const = LFRicConstants()
5318  write_accesses = AccessType.all_write_accesses()
5319  fld_args = psyGen.args_filter(
5320  self._args_args_args,
5321  arg_types=const.VALID_FIELD_NAMES,
5322  arg_accesses=write_accesses)
5323  if fld_args:
5324  for spaces in [const.CONTINUOUS_FUNCTION_SPACES,
5325  const.VALID_ANY_SPACE_NAMES,
5326  const.VALID_DISCONTINUOUS_NAMES]:
5327  for arg in fld_args:
5328  if arg.function_space.orig_name in spaces:
5329  return arg
5330 
5331  # No modified fields or operators. Check for unmodified fields...
5332  fld_args = psyGen.args_filter(
5333  self._args_args_args,
5334  arg_types=const.VALID_FIELD_NAMES)
5335  if fld_args:
5336  return fld_args[0]
5337 
5338  # it is an error if we get to here
5339  raise GenerationError(
5340  "iteration_space_arg(). The dynamo0.3 api must have a modified "
5341  "field, a modified operator, or an unmodified field (in the case "
5342  "of a modified scalar). None of these were found.")
5343 
5344  @property
5345  def dofs(self):
5346  ''' Currently required for Invoke base class although this
5347  makes no sense for Dynamo. Need to refactor the Invoke base class
5348  and remove the need for this property (#279). '''
5349  return self._dofs_dofs
5350 
5352  '''
5353  :returns: the PSyIR expressions representing this Argument list.
5354  :rtype: list of :py:class:`psyclone.psyir.nodes.Node`
5355 
5356  '''
5357  create_arg_list = KernCallArgList(self._parent_call_parent_call)
5358  create_arg_list.generate()
5359  return create_arg_list.psyir_arglist
5360 
5361  @property
5362  def acc_args(self):
5363  '''
5364  :returns: the list of quantities that must be available on an \
5365  OpenACC device before the associated kernel can be launched.
5366  :rtype: list of str
5367 
5368  '''
5369  create_acc_arg_list = KernCallAccArgList(self._parent_call_parent_call)
5370  create_acc_arg_list.generate()
5371  return create_acc_arg_list.arglist
5372 
5373  @property
5374  def scalars(self):
5375  '''
5376  Provides the list of names of scalar arguments required by the
5377  kernel associated with this Arguments object. If there are none
5378  then the returned list is empty.
5379 
5380  :returns: A list of the names of scalar arguments in this object.
5381  :rtype: list of str
5382  '''
5383  # Return nothing for the moment as it is unclear whether
5384  # scalars need to be explicitly dealt with (for OpenACC) in
5385  # the dynamo api.
5386  return []
5387 
5388 
5390  '''
5391  This class provides information about individual LFRic kernel call
5392  arguments as specified by the kernel argument metadata and the
5393  kernel invocation in the Algorithm layer.
5394 
5395  :param kernel_args: object encapsulating all arguments to the \
5396  kernel call.
5397  :type kernel_args: :py:class:`psyclone.dynamo0p3.DynKernelArguments`
5398  :param arg_meta_data: information obtained from the metadata for \
5399  this kernel argument.
5400  :type arg_meta_data: :py:class:`psyclone.domain.lfric.LFRicArgDescriptor`
5401  :param arg_info: information on how this argument is specified in \
5402  the Algorithm layer.
5403  :type arg_info: :py:class:`psyclone.parse.algorithm.Arg`
5404  :param call: the kernel object with which this argument is associated.
5405  :type call: :py:class:`psyclone.domain.lfric.LFRicKern`
5406  :param bool check: whether to check for consistency between the \
5407  kernel metadata and the algorithm layer. Defaults to True.
5408 
5409  :raises InternalError: for an unsupported metadata in the argument \
5410  descriptor data type.
5411 
5412  '''
5413  # pylint: disable=too-many-public-methods, too-many-instance-attributes
5414  def __init__(self, kernel_args, arg_meta_data, arg_info, call, check=True):
5415  # Keep a reference to DynKernelArguments object that contains
5416  # this argument. This permits us to manage name-mangling for
5417  # any-space function spaces.
5418  self._kernel_args_kernel_args = kernel_args
5419  self._vector_size_vector_size = arg_meta_data.vector_size
5420  self._argument_type_argument_type = arg_meta_data.argument_type
5421  self._stencil_stencil = None
5422  if arg_meta_data.mesh:
5423  self._mesh_mesh = arg_meta_data.mesh.lower()
5424  else:
5425  self._mesh_mesh = None
5426 
5427  # The list of function-space objects for this argument. Each
5428  # object can be queried for its original name and for the
5429  # mangled name (used to make any-space arguments distinct
5430  # within an invoke). The argument will only have more than
5431  # one function-space associated with it if it is an operator.
5432  fs1 = None
5433  fs2 = None
5434 
5435  if self.is_operatoris_operator:
5436 
5437  fs1 = FunctionSpace(arg_meta_data.function_space_to,
5438  self._kernel_args_kernel_args)
5439  fs2 = FunctionSpace(arg_meta_data.function_space_from,
5440  self._kernel_args_kernel_args)
5441  else:
5442  if arg_meta_data.function_space:
5443  fs1 = FunctionSpace(arg_meta_data.function_space,
5444  self._kernel_args_kernel_args)
5445  self._function_spaces_function_spaces = [fs1, fs2]
5446 
5447  # Set the argument's intrinsic type from its descriptor's
5448  # data type and check if an invalid data type is passed from
5449  # the argument descriptor.
5450  try:
5451  const = LFRicConstants()
5452  self._intrinsic_type_intrinsic_type = const.MAPPING_DATA_TYPES[
5453  arg_meta_data.data_type]
5454  except KeyError as err:
5455  raise InternalError(
5456  f"DynKernelArgument.__init__(): Found unsupported data "
5457  f"type '{arg_meta_data.data_type}' in the kernel argument "
5458  f"descriptor '{arg_meta_data}'.") from err
5459 
5460  # Addressing issue #753 will allow us to perform static checks
5461  # for consistency between the algorithm and the kernel
5462  # metadata. This will include checking that a field on a read
5463  # only function space is not passed to a kernel that modifies
5464  # it. Note, issue #79 is also related to this.
5465  KernelArgument.__init__(self, arg_meta_data, arg_info, call)
5466  # Argument proxy data type (if/as defined in LFRic infrastructure)
5467  self._proxy_data_type_proxy_data_type = None
5468  # Set up kernel argument information for scalar, field and operator
5469  # arguments: precision, module name, data type and proxy data type
5470  self._init_data_type_properties_init_data_type_properties(arg_info, check)
5471  # Complete the initialisation of the argument (after
5472  # _init_data_type_properties() so the precision info etc is
5473  # already set up)
5474  self._complete_init_complete_init(arg_info)
5475 
5476  def ref_name(self, function_space=None):
5477  '''
5478  Returns the name used to dereference this type of argument (depends
5479  on whether it is a field or operator and, if the latter, whether it
5480  is the to- or from-space that is specified).
5481 
5482  :param function_space: the function space of this argument
5483  :type function_space: :py:class:`psyclone.domain.lfric.FunctionSpace`
5484 
5485  :returns: the name used to dereference this argument.
5486  :rtype: str
5487 
5488  :raises GenerationError: if the supplied function space is not one \
5489  of the function spaces associated with \
5490  this argument.
5491  :raises GenerationError: if the supplied function space is not being \
5492  returned by either 'function_space_from' or \
5493  'function_space_to'.
5494  :raises GenerationError: if the argument type is not supported.
5495 
5496  '''
5497  # pylint: disable=too-many-branches
5498  if not function_space:
5499  if self.is_operatoris_operator:
5500  # For an operator we use the 'from' FS
5501  function_space = self._function_spaces_function_spaces[1]
5502  else:
5503  function_space = self._function_spaces_function_spaces[0]
5504  else:
5505  # Check that the supplied function space is valid for this
5506  # argument
5507  found = False
5508  for fspace in self.function_spacesfunction_spaces:
5509  if fspace and fspace.orig_name == function_space.orig_name:
5510  found = True
5511  break
5512  if not found:
5513  raise GenerationError(
5514  f"DynKernelArgument.ref_name(fs): The supplied function "
5515  f"space (fs='{function_space.orig_name}') is not one of "
5516  f"the function spaces associated with this argument "
5517  f"(fss={self.function_space_names}).")
5518  if self.is_fieldis_field:
5519  return "vspace"
5520  if self.is_operatoris_operator:
5521  if function_space.orig_name == self.descriptordescriptor.function_space_from:
5522  return "fs_from"
5523  if function_space.orig_name == self.descriptordescriptor.function_space_to:
5524  return "fs_to"
5525  raise GenerationError(
5526  f"DynKernelArgument.ref_name(fs): Function space "
5527  f"'{function_space.orig_name}' is one of the 'gh_operator' "
5528  f"function spaces '{self.function_spaces}' but is not being "
5529  f"returned by either function_space_from "
5530  f"'{self.descriptor.function_space_from}' or "
5531  f"function_space_to '{self.descriptor.function_space_to}'.")
5532  raise GenerationError(
5533  f"DynKernelArgument.ref_name(fs): Found unsupported argument "
5534  f"type '{self._argument_type}'.")
5535 
5536  def _init_data_type_properties(self, arg_info, check=True):
5537  '''Set up kernel argument information from LFRicConstants: precision,
5538  data type, proxy data type and module name. This is currently
5539  supported for scalar, field and operator arguments.
5540 
5541  :param arg_info: information on how this argument is specified \
5542  in the Algorithm layer.
5543  :type arg_info: :py:class:`psyclone.parse.algorithm.Arg`
5544  :param bool check: whether to use the algorithm \
5545  information. Optional argument that defaults to True.
5546 
5547  '''
5548  alg_datatype_info = None
5549  if arg_info:
5550  alg_datatype_info = arg_info._datatype
5551  alg_datatype = None
5552  alg_precision = None
5553  if alg_datatype_info:
5554  alg_datatype, alg_precision = alg_datatype_info
5555 
5556  const = LFRicConstants()
5557  if arg_info and arg_info.form == "collection":
5558  try:
5559  alg_datatype = const.FIELD_VECTOR_TO_FIELD_MAP[alg_datatype]
5560  except KeyError:
5561  # The collection datatype is not recognised or supported.
5562  alg_datatype = None
5563 
5564  if self.is_scalaris_scalaris_scalar:
5565  self._init_scalar_properties_init_scalar_properties(alg_datatype, alg_precision,
5566  check)
5567  elif self.is_fieldis_field:
5568  self._init_field_properties_init_field_properties(alg_datatype, check)
5569  elif self.is_operatoris_operator:
5570  self._init_operator_properties_init_operator_properties(alg_datatype, check)
5571  else:
5572  raise InternalError(
5573  f"Supported argument types are scalar, field and operator, "
5574  f"but the argument '{self.name}' in kernel "
5575  f"'{self._call.name}' is none of these.")
5576 
5577  def _init_scalar_properties(
5578  self, alg_datatype, alg_precision, check=True):
5579  '''Set up the properties of this scalar using algorithm datatype
5580  information if it is available.
5581 
5582  :param alg_datatype: the datatype of this argument as \
5583  specified in the algorithm layer or None if it is not \
5584  known.
5585  :type alg_datatype: str or NoneType
5586  :param alg_precision: the precision of this argument as \
5587  specified in the algorithm layer or None if it is not \
5588  known.
5589  :type alg_precision: str or NoneType
5590  :param bool check: whether to use the algorithm \
5591  information. Optional argument that defaults to True.
5592 
5593  :raises InternalError: if the intrinsic type of the scalar is \
5594  not supported.
5595  :raises GenerationError: if the datatype specified in the \
5596  algorithm layer is inconsistent with the kernel metadata.
5597  :raises GenerationError: if the datatype for a gh_scalar \
5598  could not be found in the algorithm layer.
5599  :raises NotImplementedError: if the scalar is a reduction and \
5600  its intrinsic type is not real.
5601  :raises GenerationError: if the scalar is a reduction and is \
5602  not declared with default precision.
5603 
5604  '''
5605  const = LFRicConstants()
5606  # Check the type of scalar defined in the metadata is supported.
5607  if self.intrinsic_typeintrinsic_typeintrinsic_typeintrinsic_type not in const.VALID_INTRINSIC_TYPES:
5608  raise InternalError(
5609  f"Expected one of {const.VALID_INTRINSIC_TYPES} intrinsic "
5610  f"types for a scalar argument but found "
5611  f"'{self.intrinsic_type}' in the metadata of kernel "
5612  f"{self._call.name} for argument {self.name}.")
5613 
5614  # Check the metadata and algorithm types are consistent if
5615  # the algorithm information is available and is not being ignored.
5616  if check and alg_datatype and \
5617  alg_datatype != self.intrinsic_typeintrinsic_typeintrinsic_typeintrinsic_type:
5618  raise GenerationError(
5619  f"The kernel metadata for argument '{self.name}' in "
5620  f"kernel '{self._call.name}' specifies this argument "
5621  f"should be a scalar of type '{self.intrinsic_type}' but "
5622  f"in the algorithm layer it is defined as a "
5623  f"'{alg_datatype}'.")
5624 
5625  # If the algorithm information is not being ignored and
5626  # the datatype is known in the algorithm layer and it is
5627  # not a literal then its precision should also be defined.
5628  if check and alg_datatype and not alg_precision and \
5629  not self.is_literalis_literal:
5630  raise GenerationError(
5631  f"LFRic coding standards require scalars to have "
5632  f"their precision defined in the algorithm layer but "
5633  f"'{self.name}' in '{self._call.name}' does not.")
5634 
5635  if self.accessaccessaccessaccess in AccessType.get_valid_reduction_modes():
5636  # Treat reductions separately to other scalars as it
5637  # is expected that they should match the precision of
5638  # the field they are reducing. At the moment there is
5639  # an assumption that the precision will always be a
5640  # particular value (the default), see issue #1570.
5641 
5642  # Only real reductions are supported.
5643  if not self.intrinsic_typeintrinsic_typeintrinsic_typeintrinsic_type == "real":
5644  raise NotImplementedError(
5645  "Reductions for datatypes other than real are not yet "
5646  "supported in PSyclone.")
5647 
5648  expected_precision = const.DATA_TYPE_MAP["reduction"]["kind"]
5649  # If the algorithm information is not being ignored
5650  # then check that the expected precision and the
5651  # precision defined in the algorithm layer are
5652  # the same.
5653  if check and alg_precision and \
5654  alg_precision != expected_precision:
5655  raise GenerationError(
5656  f"This scalar is a reduction which assumes precision "
5657  f"of type '{expected_precision}' but the algorithm "
5658  f"declares this scalar with precision "
5659  f"'{alg_precision}'.")
5660 
5661  # Use the default 'real' scalar reduction properties.
5662  self._precision_precision_precision = expected_precision
5663  self._data_type_data_type_data_type = const.DATA_TYPE_MAP["reduction"]["type"]
5664  self._proxy_data_type_proxy_data_type = const.DATA_TYPE_MAP[
5665  "reduction"]["proxy_type"]
5666  self._module_name_module_name_module_name = const.DATA_TYPE_MAP["reduction"]["module"]
5667  else:
5668  # This is a scalar that is not part of a reduction.
5669 
5670  if check and alg_precision:
5671  # Use the algorithm precision if it is available
5672  # and not being ignored.
5673  self._precision_precision_precision = alg_precision
5674  else:
5675  # Use default precision for this datatype if the
5676  # algorithm precision is either not available or is
5677  # being ignored.
5678  self._precision_precision_precision = const.SCALAR_PRECISION_MAP[
5679  self.intrinsic_typeintrinsic_typeintrinsic_typeintrinsic_type]
5680 
5681  def _init_field_properties(self, alg_datatype, check=True):
5682  '''Set up the properties of this field using algorithm datatype
5683  information if it is available.
5684 
5685  :param alg_datatype: the datatype of this argument as \
5686  specified in the algorithm layer or None if it is not \
5687  known.
5688  :type alg_datatype: str or NoneType
5689  :param bool check: whether to use the algorithm \
5690  information. Optional argument that defaults to True.
5691 
5692  :raises GenerationError: if the datatype for a gh_field \
5693  could not be found in the algorithm layer.
5694  :raises GenerationError: if the datatype specified in the \
5695  algorithm layer is inconsistent with the kernel metadata.
5696  :raises InternalError: if the intrinsic type of the field is \
5697  not supported (i.e. is not real or integer).
5698 
5699  '''
5700  const = LFRicConstants()
5701  argtype = None
5702  # If the algorithm information is not being ignored then
5703  # it must be available.
5704  if check and not alg_datatype:
5705  raise GenerationError(
5706  f"It was not possible to determine the field type from "
5707  f"the algorithm layer for argument '{self.name}' in "
5708  f"kernel '{self._call.name}'.")
5709 
5710  # If the algorithm information is not being ignored then
5711  # check the metadata and algorithm type are consistent and
5712  # that the metadata specifies a supported intrinsic type.
5713  if self.intrinsic_typeintrinsic_typeintrinsic_typeintrinsic_type == "real":
5714  if not check:
5715  # Use the default as we are ignoring any algorithm info
5716  argtype = "field"
5717  elif alg_datatype == "field_type":
5718  argtype = "field"
5719  elif alg_datatype == "r_bl_field_type":
5720  argtype = "r_bl_field"
5721  elif alg_datatype == "r_phys_field_type":
5722  argtype = "r_phys_field"
5723  elif alg_datatype == "r_solver_field_type":
5724  argtype = "r_solver_field"
5725  elif alg_datatype == "r_tran_field_type":
5726  argtype = "r_tran_field"
5727  else:
5728  raise GenerationError(
5729  f"The metadata for argument '{self.name}' in kernel "
5730  f"'{self._call.name}' specifies that this is a real "
5731  f"field, however it is declared as a "
5732  f"'{alg_datatype}' in the algorithm code.")
5733 
5734  elif self.intrinsic_typeintrinsic_typeintrinsic_typeintrinsic_type == "integer":
5735  if check and alg_datatype != "integer_field_type":
5736  raise GenerationError(
5737  f"The metadata for argument '{self.name}' in kernel "
5738  f"'{self._call.name}' specifies that this is an "
5739  f"integer field, however it is declared as a "
5740  f"'{alg_datatype}' in the algorithm code.")
5741  argtype = "integer_field"
5742  else:
5743  raise InternalError(
5744  f"Expected one of {const.VALID_FIELD_INTRINSIC_TYPES} "
5745  f"intrinsic types for a field argument but found "
5746  f"'{self.intrinsic_type}'.")
5747  self._data_type_data_type_data_type = const.DATA_TYPE_MAP[argtype]["type"]
5748  self._precision_precision_precision = const.DATA_TYPE_MAP[argtype]["kind"]
5749  self._proxy_data_type_proxy_data_type = const.DATA_TYPE_MAP[argtype]["proxy_type"]
5750  self._module_name_module_name_module_name = const.DATA_TYPE_MAP[argtype]["module"]
5751 
5752  def _init_operator_properties(self, alg_datatype, check=True):
5753  '''Set up the properties of this operator using algorithm datatype
5754  information if it is available.
5755 
5756  :param alg_datatype: the datatype of this argument as \
5757  specified in the algorithm layer or None if it is not \
5758  known.
5759  :type alg_datatype: str or NoneType
5760  :param bool check: whether to use the algorithm \
5761  information. Optional argument that defaults to True.
5762  :raises GenerationError: if the datatype for a gh_operator \
5763  could not be found in the algorithm layer (and check is \
5764  True).
5765  :raises GenerationError: if the datatype specified in the \
5766  algorithm layer is inconsistent with the kernel metadata.
5767  :raises InternalError: if this argument is not an operator.
5768 
5769  '''
5770  const = LFRicConstants()
5771  argtype = None
5772  if self.argument_typeargument_typeargument_typeargument_type == "gh_operator":
5773  if not check:
5774  # Use the default as we are ignoring any algorithm info
5775  argtype = "operator"
5776  elif not alg_datatype:
5777  # Raise an exception as we require algorithm
5778  # information to determine the precision of the
5779  # operator
5780  raise GenerationError(
5781  f"It was not possible to determine the operator type "
5782  f"from the algorithm layer for argument '{self.name}' "
5783  f"in kernel '{self._call.name}'.")
5784  elif alg_datatype == "operator_type":
5785  argtype = "operator"
5786  elif alg_datatype == "r_solver_operator_type":
5787  argtype = "r_solver_operator"
5788  elif alg_datatype == "r_tran_operator_type":
5789  argtype = "r_tran_operator"
5790  else:
5791  raise GenerationError(
5792  f"The metadata for argument '{self.name}' in kernel "
5793  f"'{self._call.name}' specifies that this is an "
5794  f"operator, however it is declared as a "
5795  f"'{alg_datatype}' in the algorithm code.")
5796  elif self.argument_typeargument_typeargument_typeargument_type == "gh_columnwise_operator":
5797  if check and alg_datatype and \
5798  alg_datatype != "columnwise_operator_type":
5799  raise GenerationError(
5800  f"The metadata for argument '{self.name}' in kernel "
5801  f"'{self._call.name}' specifies that this is a "
5802  f"columnwise operator, however it is declared as a "
5803  f"'{alg_datatype}' in the algorithm code.")
5804  argtype = "columnwise_operator"
5805  else:
5806  raise InternalError(
5807  f"Expected 'gh_operator' or 'gh_columnwise_operator' "
5808  f"argument type but found '{self.argument_type}'.")
5809  self._data_type_data_type_data_type = const.DATA_TYPE_MAP[argtype]["type"]
5810  self._precision_precision_precision = const.DATA_TYPE_MAP[argtype]["kind"]
5811  self._proxy_data_type_proxy_data_type = const.DATA_TYPE_MAP[argtype]["proxy_type"]
5812  self._module_name_module_name_module_name = const.DATA_TYPE_MAP[argtype]["module"]
5813 
5814  @property
5815  def is_scalar(self):
5816  '''
5817  :returns: True if this kernel argument represents a scalar, \
5818  False otherwise.
5819  :rtype: bool
5820  '''
5821  const = LFRicConstants()
5822  return self._argument_type_argument_type in const.VALID_SCALAR_NAMES
5823 
5824  @property
5825  def is_field(self):
5826  '''
5827  :returns: True if this kernel argument represents a field, \
5828  False otherwise.
5829  :rtype: bool
5830  '''
5831  const = LFRicConstants()
5832  return self._argument_type_argument_type in const.VALID_FIELD_NAMES
5833 
5834  @property
5835  def is_operator(self):
5836  '''
5837  :returns: True if this kernel argument represents an operator, \
5838  False otherwise.
5839  :rtype: bool
5840  '''
5841  const = LFRicConstants()
5842  return self._argument_type_argument_type in const.VALID_OPERATOR_NAMES
5843 
5844  @property
5845  def descriptor(self):
5846  '''
5847  :returns: a descriptor object which contains Kernel metadata \
5848  about this argument.
5849  :rtype: :py:class:`psyclone.domain.lfric.LFRicArgDescriptor`
5850  '''
5851  return self._arg_arg
5852 
5853  @property
5854  def argument_type(self):
5855  '''
5856  :returns: the API type of this argument, as specified in \
5857  the metadata.
5858  :rtype: str
5859  '''
5860  return self._argument_type_argument_type
5861 
5862  @property
5863  def intrinsic_type(self):
5864  '''
5865  :returns: the intrinsic Fortran type of this argument for scalars \
5866  or of the argument's data for fields and operators.
5867  :rtype: str
5868  '''
5869  return self._intrinsic_type_intrinsic_type
5870 
5871  @property
5872  def mesh(self):
5873  '''
5874  :returns: mesh associated with argument ('GH_FINE' or 'GH_COARSE').
5875  :rtype: str
5876  '''
5877  return self._mesh_mesh
5878 
5879  @property
5880  def vector_size(self):
5881  '''
5882  :returns: the vector size of this argument as specified in \
5883  the Kernel metadata.
5884  :rtype: str
5885  '''
5886  return self._vector_size_vector_size
5887 
5888  @property
5889  def name_indexed(self):
5890  '''
5891  :returns: the name for this argument with an additional index \
5892  which accesses the first element for a vector argument.
5893  :rtype: str
5894  '''
5895  if self._vector_size_vector_size > 1:
5896  return self._name_name+"(1)"
5897  return self._name_name
5898 
5899  def psyir_expression(self):
5900  '''
5901  Looks up or creates a reference to a suitable Symbol for this kernel
5902  argument. If the argument is a scalar that has been provided as a
5903  literal (in the Algorithm layer) then the PSyIR of the expression
5904  is returned.
5905 
5906  :returns: the PSyIR for this kernel argument.
5907  :rtype: :py:class:`psyclone.psyir.nodes.Node`
5908 
5909  :raises InternalError: if this argument is a literal but we fail to \
5910  construct PSyIR that is consistent with this.
5911  :raises NotImplementedError: if this argument is not a literal, scalar
5912  or field.
5913 
5914  '''
5915  symbol_table = self._call_call.scope.symbol_table
5916 
5917  if self.is_literalis_literal:
5918  reader = FortranReader()
5919  if self.precisionprecision:
5920  # Ensure any associated precision symbol is in the table.
5921  symbol_table.add_lfric_precision_symbol(self.precisionprecision)
5922  lit = reader.psyir_from_expression(self.namename, symbol_table)
5923 
5924  # Sanity check that the resulting expression is a literal.
5925  if lit.walk(Reference):
5926  raise InternalError(
5927  f"Expected argument '{self.name}' to kernel "
5928  f"'{self.call.name}' to be a literal but the created "
5929  f"PSyIR contains one or more References.")
5930  return lit
5931 
5932  if self.is_scalaris_scalaris_scalar:
5933  try:
5934  scalar_sym = symbol_table.lookup(self.namename)
5935  except KeyError:
5936  # TODO once #1258 is done the symbols should already exist
5937  # and therefore we should raise an exception if not.
5938  scalar_sym = symbol_table.new_symbol(
5939  self.namename, symbol_type=DataSymbol,
5940  datatype=self.infer_datatypeinfer_datatypeinfer_datatype())
5941  return Reference(scalar_sym)
5942 
5943  const = LFRicConstants()
5944  try:
5945  suffix = const.ARG_TYPE_SUFFIX_MAPPING[self.argument_typeargument_typeargument_typeargument_type]
5946  tag_name = f"{self.name}:{suffix}"
5947  sym = symbol_table.lookup_with_tag(tag_name)
5948  return Reference(sym)
5949 
5950  except KeyError as err:
5951  raise NotImplementedError(
5952  f"Unsupported kernel argument type: '{self.name}' is of type "
5953  f"'{self.argument_type}' which is not recognised as being a "
5954  f"literal, scalar or field.") from err
5955 
5956  @property
5957  def declaration_name(self):
5958  '''
5959  :returns: the name for this argument with the array dimensions \
5960  added if required.
5961  :rtype: str
5962  '''
5963  if self._vector_size_vector_size > 1:
5964  return self._name_name+"("+str(self._vector_size_vector_size)+")"
5965  return self._name_name
5966 
5967  @property
5968  def proxy_name(self):
5969  '''
5970  :returns: the proxy name for this argument.
5971  :rtype: str
5972  '''
5973  return self._name_name+"_proxy"
5974 
5975  @property
5977  '''
5978  :returns: the proxy name for this argument with an additional \
5979  index which accesses the first element for a vector \
5980  argument.
5981  :rtype: str
5982  '''
5983  if self._vector_size_vector_size > 1:
5984  return self._name_name+"_proxy(1)"
5985  return self._name_name+"_proxy"
5986 
5987  @property
5989  '''
5990  :returns: the proxy name for this argument with the array \
5991  dimensions added if required.
5992  :rtype: str
5993  '''
5994  if self._vector_size_vector_size > 1:
5995  return self.proxy_nameproxy_name+"("+str(self._vector_size_vector_size)+")"
5996  return self.proxy_nameproxy_name
5997 
5998  @property
5999  def proxy_data_type(self):
6000  '''
6001  :returns: the type of this argument's proxy (if it exists) as \
6002  defined in LFRic infrastructure.
6003  :rtype: str or NoneType
6004 
6005  '''
6006  return self._proxy_data_type_proxy_data_type
6007 
6008  @property
6009  def function_space(self):
6010  '''
6011  Returns the expected finite element function space for a kernel
6012  argument as specified by the kernel argument metadata: a single
6013  function space for a field and function_space_from for an operator.
6014 
6015  :returns: function space for this argument.
6016  :rtype: :py:class:`psyclone.domain.lfric.FunctionSpace`
6017  '''
6018  if self._argument_type_argument_type == "gh_operator":
6019  # We return the 'from' space for an operator argument
6020  return self.function_space_fromfunction_space_from
6021  return self._function_spaces_function_spaces[0]
6022 
6023  @property
6025  '''
6026  :returns: the 'to' function space of an operator.
6027  :rtype: str
6028  '''
6029  return self._function_spaces_function_spaces[0]
6030 
6031  @property
6033  '''
6034  :returns: the 'from' function space of an operator.
6035  :rtype: str
6036  '''
6037  return self._function_spaces_function_spaces[1]
6038 
6039  @property
6040  def function_spaces(self):
6041  '''
6042  Returns the expected finite element function space for a kernel
6043  argument as specified by the kernel argument metadata: a single
6044  function space for a field and a list containing
6045  function_space_to and function_space_from for an operator.
6046 
6047  :returns: function space(s) for this argument.
6048  :rtype: list of :py:class:`psyclone.domain.lfric.FunctionSpace`
6049 
6050  '''
6051  return self._function_spaces_function_spaces
6052 
6053  @property
6055  '''
6056  Returns a list of the names of the function spaces associated
6057  with this argument. We have more than one function space when
6058  dealing with operators.
6059 
6060  :returns: list of function space names for this argument.
6061  :rtype: list of str
6062 
6063  '''
6064  fs_names = []
6065  for fspace in self._function_spaces_function_spaces:
6066  if fspace:
6067  fs_names.append(fspace.orig_name)
6068  return fs_names
6069 
6070  @property
6071  def intent(self):
6072  '''
6073  Returns the Fortran intent of this argument as defined by the
6074  valid access types for this API
6075 
6076  :returns: the expected Fortran intent for this argument as \
6077  specified by the kernel argument metadata
6078  :rtype: str
6079 
6080  '''
6081  write_accesses = AccessType.all_write_accesses()
6082  if self.accessaccessaccessaccess == AccessType.READ:
6083  return "in"
6084  if self.accessaccessaccessaccess in write_accesses:
6085  return "inout"
6086  # An argument access other than the pure "read" or one of
6087  # the "write" accesses is invalid
6088  valid_accesses = [AccessType.READ.api_specific_name()] + \
6089  [access.api_specific_name() for access in write_accesses]
6090  raise GenerationError(
6091  f"In the LFRic API the argument access must be one of "
6092  f"{valid_accesses}, but found '{self.access}'.")
6093 
6094  @property
6095  def discontinuous(self):
6096  '''
6097  Returns True if this argument is known to be on a discontinuous
6098  function space including any_discontinuous_space, otherwise
6099  returns False.
6100 
6101  :returns: whether the argument is discontinuous.
6102  :rtype: bool
6103 
6104  '''
6105  const = LFRicConstants()
6106  if self.function_spacefunction_space.orig_name in \
6107  const.VALID_DISCONTINUOUS_NAMES:
6108  return True
6109  if self.function_spacefunction_space.orig_name in \
6110  const.VALID_ANY_SPACE_NAMES:
6111  # We will eventually look this up based on our dependence
6112  # analysis but for the moment we assume the worst
6113  return False
6114  return False
6115 
6116  @property
6117  def stencil(self):
6118  '''
6119  :returns: stencil information for this argument if it exists.
6120  :rtype: :py:class:`psyclone.dynamo0p3.LFRicArgStencil`
6121  '''
6122  return self._stencil_stencil
6123 
6124  @stencil.setter
6125  def stencil(self, value):
6126  '''
6127  Sets stencil information for this kernel argument.
6128 
6129  :param value: stencil information for this argument.
6130  :type value: :py:class:`psyclone.dynamo0p3.LFRicArgStencil`
6131 
6132  '''
6133  self._stencil_stencil = value
6134 
6135  def infer_datatype(self, proxy=False):
6136  '''
6137  Infer the datatype of this kernel argument in the PSy layer using
6138  the LFRic API rules. If any LFRic infrastructure modules are required
6139  but are not already present then suitable ContainerSymbols are added
6140  to the outermost symbol table. Similarly, DataTypeSymbols are added for
6141  any required LFRic derived types that are not already in the symbol
6142  table.
6143 
6144  TODO #1258 - ultimately this routine should not have to create any
6145  DataTypeSymbols as that should already have been done.
6146 
6147  :param bool proxy: whether or not we want the type of the proxy \
6148  object for this kernel argument. Defaults to False (i.e.
6149  return the type rather than the proxy type).
6150 
6151  :returns: the datatype of this argument.
6152  :rtype: :py:class:`psyclone.psyir.symbols.DataType`
6153 
6154  :raises NotImplementedError: if an unsupported argument type is found.
6155 
6156  '''
6157  # We want to put any Container symbols in the outermost scope so find
6158  # the corresponding symbol table.
6159  symbol_table = self._call_call.scope.symbol_table
6160  root_table = symbol_table
6161  while root_table.parent_symbol_table():
6162  root_table = root_table.parent_symbol_table()
6163 
6164  def _find_or_create_type(mod_name, type_name):
6165  '''
6166  Utility to find or create a DataTypeSymbol with the supplied name,
6167  imported from the named module.
6168 
6169  :param str mod_name: the name of the module from which the \
6170  DataTypeSymbol should be imported.
6171  :param str type_name: the name of the derived type for which to \
6172  create a DataTypeSymbol.
6173 
6174  :returns: the symbol for the requested type.
6175  :rtype: :py:class:`psyclone.psyir.symbols.DataTypeSymbol`
6176 
6177  '''
6178  return root_table.find_or_create(
6179  type_name,
6180  symbol_type=DataTypeSymbol,
6181  datatype=UnresolvedType(),
6182  interface=ImportInterface(root_table.find_or_create(
6183  mod_name,
6184  symbol_type=ContainerSymbol)
6185  ))
6186 
6187  if self.is_scalaris_scalaris_scalar:
6188  # Find or create the DataType for the appropriate scalar type.
6189  if self.intrinsic_typeintrinsic_typeintrinsic_typeintrinsic_type == "real":
6190  prim_type = ScalarType.Intrinsic.REAL
6191  elif self.intrinsic_typeintrinsic_typeintrinsic_typeintrinsic_type == "integer":
6192  prim_type = ScalarType.Intrinsic.INTEGER
6193  elif self.intrinsic_typeintrinsic_typeintrinsic_typeintrinsic_type == "logical":
6194  prim_type = ScalarType.Intrinsic.BOOLEAN
6195  else:
6196  raise NotImplementedError(
6197  f"Unsupported scalar type '{self.intrinsic_type}'")
6198 
6199  kind_name = self.precisionprecision
6200  try:
6201  kind_symbol = symbol_table.lookup(kind_name)
6202  except KeyError:
6203  mod_map = LFRicConstants().UTILITIES_MOD_MAP
6204  const_mod = mod_map["constants"]["module"]
6205  try:
6206  constants_container = symbol_table.lookup(const_mod)
6207  except KeyError:
6208  # TODO Once #696 is done, we should *always* have a
6209  # symbol for this container at this point so should
6210  # raise an exception if we haven't.
6211  constants_container = LFRicTypes(const_mod)
6212  root_table.add(constants_container)
6213  kind_symbol = DataSymbol(
6214  kind_name, INTEGER_TYPE,
6215  interface=ImportInterface(constants_container))
6216  root_table.add(kind_symbol)
6217  return ScalarType(prim_type, kind_symbol)
6218 
6219  if self.is_fieldis_field or self.is_operatoris_operator:
6220  # Find or create the DataTypeSymbol for the appropriate
6221  # field or operator type.
6222  mod_name = self._module_name_module_name_module_name
6223  if proxy:
6224  type_name = self._proxy_data_type_proxy_data_type
6225  else:
6226  type_name = self._data_type_data_type_data_type
6227  return _find_or_create_type(mod_name, type_name)
6228 
6229  raise NotImplementedError(
6230  f"'{str(self)}' is not a scalar, field or operator argument")
6231 
6232 
6233 class DynACCEnterDataDirective(ACCEnterDataDirective):
6234  '''
6235  Sub-classes ACCEnterDataDirective to provide an API-specific implementation
6236  of data_on_device().
6237 
6238  '''
6239  def data_on_device(self, _):
6240  '''
6241  Provide a hook to be able to add information about data being on a
6242  device (or not). This is currently not used in dynamo0p3.
6243 
6244  '''
6245  return None
6246 
6247 
6248 # ---------- Documentation utils -------------------------------------------- #
6249 # The list of module members that we wish AutoAPI to generate
6250 # documentation for. (See https://psyclone-ref.readthedocs.io)
6251 __all__ = [
6252  'DynFuncDescriptor03',
6253  'DynamoPSy',
6254  'DynFunctionSpaces',
6255  'DynProxies',
6256  'DynCellIterators',
6257  'DynLMAOperators',
6258  'DynCMAOperators',
6259  'DynMeshes',
6260  'DynInterGrid',
6261  'DynBasisFunctions',
6262  'DynBoundaryConditions',
6263  'DynInvokeSchedule',
6264  'DynGlobalSum',
6265  'LFRicHaloExchange',
6266  'LFRicHaloExchangeStart',
6267  'LFRicHaloExchangeEnd',
6268  'HaloDepth',
6269  'HaloWriteAccess',
6270  'HaloReadAccess',
6271  'FSDescriptor',
6272  'FSDescriptors',
6273  'LFRicArgStencil',
6274  'DynKernelArguments',
6275  'DynKernelArgument',
6276  'DynACCEnterDataDirective']
def basis_first_dim_value(function_space)
Definition: dynamo0p3.py:2729
def _compute_basis_fns(self, parent)
Definition: dynamo0p3.py:3386
def _initialise_xoyoz_qr(self, parent)
Definition: dynamo0p3.py:3294
def diff_basis_first_dim_name(function_space)
Definition: dynamo0p3.py:2758
def _setup_basis_fns_for_call(self, call)
Definition: dynamo0p3.py:2806
def basis_first_dim_name(function_space)
Definition: dynamo0p3.py:2715
def diff_basis_first_dim_value(function_space)
Definition: dynamo0p3.py:2773
def _initialise_xyz_qr(self, parent)
Definition: dynamo0p3.py:3220
def _initialise_face_or_edge_qr(self, parent, qr_type)
Definition: dynamo0p3.py:3308
def _initialise_xyoz_qr(self, parent)
Definition: dynamo0p3.py:3234
def gen_code(self, parent)
Definition: dynamo0p3.py:3706
def set_colour_info(self, colour_map, ncolours, last_cell)
Definition: dynamo0p3.py:2592
def node_str(self, colour=True)
Definition: dynamo0p3.py:3656
def _init_data_type_properties(self, arg_info, check=True)
Definition: dynamo0p3.py:5536
def _init_scalar_properties(self, alg_datatype, alg_precision, check=True)
Definition: dynamo0p3.py:5578
def infer_datatype(self, proxy=False)
Definition: dynamo0p3.py:6135
def _init_operator_properties(self, alg_datatype, check=True)
Definition: dynamo0p3.py:5752
def ref_name(self, function_space=None)
Definition: dynamo0p3.py:5476
def _init_field_properties(self, alg_datatype, check=True)
Definition: dynamo0p3.py:5681
def get_arg_on_space(self, func_space)
Definition: dynamo0p3.py:5216
def get_arg_on_space_name(self, func_space_name)
Definition: dynamo0p3.py:5191
def has_operator(self, op_type=None)
Definition: dynamo0p3.py:5241
def initialise(self, parent)
Definition: dynamo0p3.py:2360
def _add_mesh_symbols(self, mesh_tags)
Definition: dynamo0p3.py:2120
def declarations(self, parent)
Definition: dynamo0p3.py:2246
def _add_symbol(self, name, tag, intrinsic_type, arg, rank)
Definition: dynamo0p3.py:1373
def initialise(self, parent)
Definition: dynamo0p3.py:1550
def get_descriptor(self, fspace)
Definition: dynamo0p3.py:4990
def set_by_value(self, max_depth, var_depth, literal_depth, annexed_only, max_depth_m1)
Definition: dynamo0p3.py:4560
def literal_depth(self, value)
Definition: dynamo0p3.py:4551
def _compute_from_field(self, field)
Definition: dynamo0p3.py:4807
def _compute_from_field(self, field)
Definition: dynamo0p3.py:4708
def _compute_halo_read_info(self, ignore_hex_dep=False)
Definition: dynamo0p3.py:3950
def node_str(self, colour=True)
Definition: dynamo0p3.py:4215
def _compute_halo_read_depth_info(self, ignore_hex_dep=False)
Definition: dynamo0p3.py:3922
def required(self, ignore_hex_dep=False)
Definition: dynamo0p3.py:4040
def kern_args(self, stub=False, var_accesses=None, kern_call_arg_list=None)
Definition: dynamo0p3.py:548
def access(self, value)
Definition: psyGen.py:2282
def intrinsic_type(self)
Definition: psyGen.py:2312
def infer_datatype(self)
Definition: psyGen.py:2247
def _complete_init(self, arg_info)
Definition: psyGen.py:2183
def argument_type(self)
Definition: psyGen.py:2298
def append(self, name, argument_type)
Definition: psyGen.py:1978
def invoke(self, my_invoke)
Definition: psyGen.py:748
def name(self)
Definition: psyGen.py:282
def invokes(self)
Definition: psyGen.py:275
def container(self)
Definition: psyGen.py:264